Total coverage: 236913 (13%)of 1872920
2 2 4 1 1 1 2 16 9 9 1 1 2 18 18 19 19 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 // SPDX-License-Identifier: GPL-2.0-or-later /* * * Bluetooth virtual HCI driver * * Copyright (C) 2000-2001 Qualcomm Incorporated * Copyright (C) 2002-2003 Maxim Krasnyansky <maxk@qualcomm.com> * Copyright (C) 2004-2006 Marcel Holtmann <marcel@holtmann.org> */ #include <linux/module.h> #include <linux/unaligned.h> #include <linux/atomic.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/poll.h> #include <linux/skbuff.h> #include <linux/miscdevice.h> #include <linux/debugfs.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #define VERSION "1.5" static bool amp; struct vhci_data { struct hci_dev *hdev; wait_queue_head_t read_wait; struct sk_buff_head readq; struct mutex open_mutex; struct delayed_work open_timeout; struct work_struct suspend_work; bool suspended; bool wakeup; __u16 msft_opcode; bool aosp_capable; atomic_t initialized; }; static int vhci_open_dev(struct hci_dev *hdev) { return 0; } static int vhci_close_dev(struct hci_dev *hdev) { struct vhci_data *data = hci_get_drvdata(hdev); skb_queue_purge(&data->readq); return 0; } static int vhci_flush(struct hci_dev *hdev) { struct vhci_data *data = hci_get_drvdata(hdev); skb_queue_purge(&data->readq); return 0; } static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { struct vhci_data *data = hci_get_drvdata(hdev); memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); skb_queue_tail(&data->readq, skb); if (atomic_read(&data->initialized)) wake_up_interruptible(&data->read_wait); return 0; } static int vhci_get_data_path_id(struct hci_dev *hdev, u8 *data_path_id) { *data_path_id = 0; return 0; } static int vhci_get_codec_config_data(struct hci_dev *hdev, __u8 type, struct bt_codec *codec, __u8 *vnd_len, __u8 **vnd_data) { if (type != ESCO_LINK) return -EINVAL; *vnd_len = 0; *vnd_data = NULL; return 0; } static bool vhci_wakeup(struct hci_dev *hdev) { struct vhci_data *data = hci_get_drvdata(hdev); return data->wakeup; } static ssize_t force_suspend_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *data = file->private_data; char buf[3]; buf[0] = data->suspended ? 'Y' : 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static void vhci_suspend_work(struct work_struct *work) { struct vhci_data *data = container_of(work, struct vhci_data, suspend_work); if (data->suspended) hci_suspend_dev(data->hdev); else hci_resume_dev(data->hdev); } static ssize_t force_suspend_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *data = file->private_data; bool enable; int err; err = kstrtobool_from_user(user_buf, count, &enable); if (err) return err; if (data->suspended == enable) return -EALREADY; data->suspended = enable; schedule_work(&data->suspend_work); return count; } static const struct file_operations force_suspend_fops = { .open = simple_open, .read = force_suspend_read, .write = force_suspend_write, .llseek = default_llseek, }; static ssize_t force_wakeup_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *data = file->private_data; char buf[3]; buf[0] = data->wakeup ? 'Y' : 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static ssize_t force_wakeup_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *data = file->private_data; bool enable; int err; err = kstrtobool_from_user(user_buf, count, &enable); if (err) return err; if (data->wakeup == enable) return -EALREADY; data->wakeup = enable; return count; } static const struct file_operations force_wakeup_fops = { .open = simple_open, .read = force_wakeup_read, .write = force_wakeup_write, .llseek = default_llseek, }; static int msft_opcode_set(void *data, u64 val) { struct vhci_data *vhci = data; if (val > 0xffff || hci_opcode_ogf(val) != 0x3f) return -EINVAL; if (vhci->msft_opcode) return -EALREADY; vhci->msft_opcode = val; return 0; } static int msft_opcode_get(void *data, u64 *val) { struct vhci_data *vhci = data; *val = vhci->msft_opcode; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(msft_opcode_fops, msft_opcode_get, msft_opcode_set, "%llu\n"); static ssize_t aosp_capable_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *vhci = file->private_data; char buf[3]; buf[0] = vhci->aosp_capable ? 'Y' : 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static ssize_t aosp_capable_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *vhci = file->private_data; bool enable; int err; err = kstrtobool_from_user(user_buf, count, &enable); if (err) return err; if (!enable) return -EINVAL; if (vhci->aosp_capable) return -EALREADY; vhci->aosp_capable = enable; return count; } static const struct file_operations aosp_capable_fops = { .open = simple_open, .read = aosp_capable_read, .write = aosp_capable_write, .llseek = default_llseek, }; static int vhci_setup(struct hci_dev *hdev) { struct vhci_data *vhci = hci_get_drvdata(hdev); if (vhci->msft_opcode) hci_set_msft_opcode(hdev, vhci->msft_opcode); if (vhci->aosp_capable) hci_set_aosp_capable(hdev); return 0; } static void vhci_coredump(struct hci_dev *hdev) { /* No need to do anything */ } static void vhci_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb) { const char *buf; buf = "Controller Name: vhci_ctrl\n"; skb_put_data(skb, buf, strlen(buf)); buf = "Firmware Version: vhci_fw\n"; skb_put_data(skb, buf, strlen(buf)); buf = "Driver: vhci_drv\n"; skb_put_data(skb, buf, strlen(buf)); buf = "Vendor: vhci\n"; skb_put_data(skb, buf, strlen(buf)); } #define MAX_COREDUMP_LINE_LEN 40 struct devcoredump_test_data { enum devcoredump_state state; unsigned int timeout; char data[MAX_COREDUMP_LINE_LEN]; }; static inline void force_devcd_timeout(struct hci_dev *hdev, unsigned int timeout) { #ifdef CONFIG_DEV_COREDUMP hdev->dump.timeout = secs_to_jiffies(timeout); #endif } static ssize_t force_devcd_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *data = file->private_data; struct hci_dev *hdev = data->hdev; struct sk_buff *skb = NULL; struct devcoredump_test_data dump_data; size_t data_size; int ret; if (count < offsetof(struct devcoredump_test_data, data) || count > sizeof(dump_data)) return -EINVAL; if (copy_from_user(&dump_data, user_buf, count)) return -EFAULT; data_size = count - offsetof(struct devcoredump_test_data, data); skb = alloc_skb(data_size, GFP_ATOMIC); if (!skb) return -ENOMEM; skb_put_data(skb, &dump_data.data, data_size); hci_devcd_register(hdev, vhci_coredump, vhci_coredump_hdr, NULL); /* Force the devcoredump timeout */ if (dump_data.timeout) force_devcd_timeout(hdev, dump_data.timeout); ret = hci_devcd_init(hdev, skb->len); if (ret) { BT_ERR("Failed to generate devcoredump"); kfree_skb(skb); return ret; } hci_devcd_append(hdev, skb); switch (dump_data.state) { case HCI_DEVCOREDUMP_DONE: hci_devcd_complete(hdev); break; case HCI_DEVCOREDUMP_ABORT: hci_devcd_abort(hdev); break; case HCI_DEVCOREDUMP_TIMEOUT: /* Do nothing */ break; default: return -EINVAL; } return count; } static const struct file_operations force_devcoredump_fops = { .open = simple_open, .write = force_devcd_write, }; static void vhci_debugfs_init(struct vhci_data *data) { struct hci_dev *hdev = data->hdev; debugfs_create_file("force_suspend", 0644, hdev->debugfs, data, &force_suspend_fops); debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data, &force_wakeup_fops); if (IS_ENABLED(CONFIG_BT_MSFTEXT)) debugfs_create_file("msft_opcode", 0644, hdev->debugfs, data, &msft_opcode_fops); if (IS_ENABLED(CONFIG_BT_AOSPEXT)) debugfs_create_file("aosp_capable", 0644, hdev->debugfs, data, &aosp_capable_fops); debugfs_create_file("force_devcoredump", 0644, hdev->debugfs, data, &force_devcoredump_fops); } static int __vhci_create_device(struct vhci_data *data, __u8 opcode) { struct hci_dev *hdev; struct sk_buff *skb; if (data->hdev) return -EBADFD; /* bits 2-5 are reserved (must be zero) */ if (opcode & 0x3c) return -EINVAL; skb = bt_skb_alloc(4, GFP_KERNEL); if (!skb) return -ENOMEM; hdev = hci_alloc_dev(); if (!hdev) { kfree_skb(skb); return -ENOMEM; } data->hdev = hdev; hdev->bus = HCI_VIRTUAL; hci_set_drvdata(hdev, data); hdev->open = vhci_open_dev; hdev->close = vhci_close_dev; hdev->flush = vhci_flush; hdev->send = vhci_send_frame; hdev->get_data_path_id = vhci_get_data_path_id; hdev->get_codec_config_data = vhci_get_codec_config_data; hdev->wakeup = vhci_wakeup; hdev->setup = vhci_setup; hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP); hci_set_quirk(hdev, HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED); /* bit 6 is for external configuration */ if (opcode & 0x40) hci_set_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG); /* bit 7 is for raw device */ if (opcode & 0x80) hci_set_quirk(hdev, HCI_QUIRK_RAW_DEVICE); if (hci_register_dev(hdev) < 0) { BT_ERR("Can't register HCI device"); hci_free_dev(hdev); data->hdev = NULL; kfree_skb(skb); return -EBUSY; } if (!IS_ERR_OR_NULL(hdev->debugfs)) vhci_debugfs_init(data); hci_skb_pkt_type(skb) = HCI_VENDOR_PKT; skb_put_u8(skb, 0xff); skb_put_u8(skb, opcode); put_unaligned_le16(hdev->id, skb_put(skb, 2)); skb_queue_head(&data->readq, skb); atomic_inc(&data->initialized); wake_up_interruptible(&data->read_wait); return 0; } static int vhci_create_device(struct vhci_data *data, __u8 opcode) { int err; mutex_lock(&data->open_mutex); err = __vhci_create_device(data, opcode); mutex_unlock(&data->open_mutex); return err; } static inline ssize_t vhci_get_user(struct vhci_data *data, struct iov_iter *from) { size_t len = iov_iter_count(from); struct sk_buff *skb; __u8 pkt_type, opcode; int ret; if (len < 2 || len > HCI_MAX_FRAME_SIZE) return -EINVAL; skb = bt_skb_alloc(len, GFP_KERNEL); if (!skb) return -ENOMEM; if (!copy_from_iter_full(skb_put(skb, len), len, from)) { kfree_skb(skb); return -EFAULT; } pkt_type = *((__u8 *) skb->data); skb_pull(skb, 1); switch (pkt_type) { case HCI_EVENT_PKT: case HCI_ACLDATA_PKT: case HCI_SCODATA_PKT: case HCI_ISODATA_PKT: if (!data->hdev) { kfree_skb(skb); return -ENODEV; } hci_skb_pkt_type(skb) = pkt_type; ret = hci_recv_frame(data->hdev, skb); break; case HCI_VENDOR_PKT: cancel_delayed_work_sync(&data->open_timeout); opcode = *((__u8 *) skb->data); skb_pull(skb, 1); if (skb->len > 0) { kfree_skb(skb); return -EINVAL; } kfree_skb(skb); ret = vhci_create_device(data, opcode); break; default: kfree_skb(skb); return -EINVAL; } return (ret < 0) ? ret : len; } static inline ssize_t vhci_put_user(struct vhci_data *data, struct sk_buff *skb, char __user *buf, int count) { char __user *ptr = buf; int len; len = min_t(unsigned int, skb->len, count); if (copy_to_user(ptr, skb->data, len)) return -EFAULT; if (!data->hdev) return len; data->hdev->stat.byte_tx += len; switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: data->hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: data->hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: data->hdev->stat.sco_tx++; break; } return len; } static ssize_t vhci_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct vhci_data *data = file->private_data; struct sk_buff *skb; ssize_t ret = 0; while (count) { skb = skb_dequeue(&data->readq); if (skb) { ret = vhci_put_user(data, skb, buf, count); if (ret < 0) skb_queue_head(&data->readq, skb); else kfree_skb(skb); break; } if (file->f_flags & O_NONBLOCK) { ret = -EAGAIN; break; } ret = wait_event_interruptible(data->read_wait, !skb_queue_empty(&data->readq)); if (ret < 0) break; } return ret; } static ssize_t vhci_write(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct vhci_data *data = file->private_data; return vhci_get_user(data, from); } static __poll_t vhci_poll(struct file *file, poll_table *wait) { struct vhci_data *data = file->private_data; poll_wait(file, &data->read_wait, wait); if (!skb_queue_empty(&data->readq)) return EPOLLIN | EPOLLRDNORM; return EPOLLOUT | EPOLLWRNORM; } static void vhci_open_timeout(struct work_struct *work) { struct vhci_data *data = container_of(work, struct vhci_data, open_timeout.work); vhci_create_device(data, 0x00); } static int vhci_open(struct inode *inode, struct file *file) { struct vhci_data *data; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; skb_queue_head_init(&data->readq); init_waitqueue_head(&data->read_wait); mutex_init(&data->open_mutex); INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout); INIT_WORK(&data->suspend_work, vhci_suspend_work); file->private_data = data; nonseekable_open(inode, file); schedule_delayed_work(&data->open_timeout, secs_to_jiffies(1)); return 0; } static void vhci_debugfs_remove(struct hci_dev *hdev) { debugfs_lookup_and_remove("force_suspend", hdev->debugfs); debugfs_lookup_and_remove("force_wakeup", hdev->debugfs); if (IS_ENABLED(CONFIG_BT_MSFTEXT)) debugfs_lookup_and_remove("msft_opcode", hdev->debugfs); if (IS_ENABLED(CONFIG_BT_AOSPEXT)) debugfs_lookup_and_remove("aosp_capable", hdev->debugfs); debugfs_lookup_and_remove("force_devcoredump", hdev->debugfs); } static int vhci_release(struct inode *inode, struct file *file) { struct vhci_data *data = file->private_data; struct hci_dev *hdev; cancel_delayed_work_sync(&data->open_timeout); flush_work(&data->suspend_work); hdev = data->hdev; if (hdev) { if (!IS_ERR_OR_NULL(hdev->debugfs)) vhci_debugfs_remove(hdev); hci_unregister_dev(hdev); hci_free_dev(hdev); } skb_queue_purge(&data->readq); file->private_data = NULL; kfree(data); return 0; } static const struct file_operations vhci_fops = { .owner = THIS_MODULE, .read = vhci_read, .write_iter = vhci_write, .poll = vhci_poll, .open = vhci_open, .release = vhci_release, }; static struct miscdevice vhci_miscdev = { .name = "vhci", .fops = &vhci_fops, .minor = VHCI_MINOR, }; module_misc_device(vhci_miscdev); module_param(amp, bool, 0644); MODULE_PARM_DESC(amp, "Create AMP controller device"); MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_ALIAS("devname:vhci"); MODULE_ALIAS_MISCDEV(VHCI_MINOR);
8 1 12 10 9 2 9 9 9 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. */ #ifndef _WG_QUEUEING_H #define _WG_QUEUEING_H #include "peer.h" #include <linux/types.h> #include <linux/skbuff.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <net/ip_tunnels.h> struct wg_device; struct wg_peer; struct multicore_worker; struct crypt_queue; struct prev_queue; struct sk_buff; /* queueing.c APIs: */ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, unsigned int len); void wg_packet_queue_free(struct crypt_queue *queue, bool purge); struct multicore_worker __percpu * wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr); /* receive.c APIs: */ void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb); void wg_packet_handshake_receive_worker(struct work_struct *work); /* NAPI poll function: */ int wg_packet_rx_poll(struct napi_struct *napi, int budget); /* Workqueue worker: */ void wg_packet_decrypt_worker(struct work_struct *work); /* send.c APIs: */ void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer, bool is_retry); void wg_packet_send_handshake_response(struct wg_peer *peer); void wg_packet_send_handshake_cookie(struct wg_device *wg, struct sk_buff *initiating_skb, __le32 sender_index); void wg_packet_send_keepalive(struct wg_peer *peer); void wg_packet_purge_staged_packets(struct wg_peer *peer); void wg_packet_send_staged_packets(struct wg_peer *peer); /* Workqueue workers: */ void wg_packet_handshake_send_worker(struct work_struct *work); void wg_packet_tx_worker(struct work_struct *work); void wg_packet_encrypt_worker(struct work_struct *work); enum packet_state { PACKET_STATE_UNCRYPTED, PACKET_STATE_CRYPTED, PACKET_STATE_DEAD }; struct packet_cb { u64 nonce; struct noise_keypair *keypair; atomic_t state; u32 mtu; u8 ds; }; #define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb)) #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer) static inline bool wg_check_packet_protocol(struct sk_buff *skb) { __be16 real_protocol = ip_tunnel_parse_protocol(skb); return real_protocol && skb->protocol == real_protocol; } static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating) { u8 l4_hash = skb->l4_hash; u8 sw_hash = skb->sw_hash; u32 hash = skb->hash; skb_scrub_packet(skb, true); memset(&skb->headers, 0, sizeof(skb->headers)); if (encapsulating) { skb->l4_hash = l4_hash; skb->sw_hash = sw_hash; skb->hash = hash; } skb->queue_mapping = 0; skb->nohdr = 0; skb->peeked = 0; skb->mac_len = 0; skb->dev = NULL; #ifdef CONFIG_NET_SCHED skb->tc_index = 0; #endif skb_reset_redirect(skb); skb->hdr_len = skb_headroom(skb); skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_reset_transport_header(skb); skb_probe_transport_header(skb); skb_reset_inner_headers(skb); } static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id) { unsigned int cpu = *stored_cpu; while (unlikely(cpu >= nr_cpu_ids || !cpu_online(cpu))) cpu = *stored_cpu = cpumask_nth(id % num_online_cpus(), cpu_online_mask); return cpu; } /* This function is racy, in the sense that it's called while last_cpu is * unlocked, so it could return the same CPU twice. Adding locking or using * atomic sequence numbers is slower though, and the consequences of racing are * harmless, so live with it. */ static inline int wg_cpumask_next_online(int *last_cpu) { int cpu = cpumask_next(READ_ONCE(*last_cpu), cpu_online_mask); if (cpu >= nr_cpu_ids) cpu = cpumask_first(cpu_online_mask); WRITE_ONCE(*last_cpu, cpu); return cpu; } void wg_prev_queue_init(struct prev_queue *queue); /* Multi producer */ bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb); /* Single consumer */ struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue); /* Single consumer */ static inline struct sk_buff *wg_prev_queue_peek(struct prev_queue *queue) { if (queue->peeked) return queue->peeked; queue->peeked = wg_prev_queue_dequeue(queue); return queue->peeked; } /* Single consumer */ static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue) { queue->peeked = NULL; } static inline int wg_queue_enqueue_per_device_and_peer( struct crypt_queue *device_queue, struct prev_queue *peer_queue, struct sk_buff *skb, struct workqueue_struct *wq) { int cpu; atomic_set_release(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED); /* We first queue this up for the peer ingestion, but the consumer * will wait for the state to change to CRYPTED or DEAD before. */ if (unlikely(!wg_prev_queue_enqueue(peer_queue, skb))) return -ENOSPC; /* Then we queue it up in the device queue, which consumes the * packet as soon as it can. */ cpu = wg_cpumask_next_online(&device_queue->last_cpu); if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb))) return -EPIPE; queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work); return 0; } static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet_state state) { /* We take a reference, because as soon as we call atomic_set, the * peer can be freed from below us. */ struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb)); atomic_set_release(&PACKET_CB(skb)->state, state); queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id), peer->device->packet_crypt_wq, &peer->transmit_packet_work); wg_peer_put(peer); } static inline void wg_queue_enqueue_per_peer_rx(struct sk_buff *skb, enum packet_state state) { /* We take a reference, because as soon as we call atomic_set, the * peer can be freed from below us. */ struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb)); atomic_set_release(&PACKET_CB(skb)->state, state); napi_schedule(&peer->napi); wg_peer_put(peer); } #ifdef DEBUG bool wg_packet_counter_selftest(void); #endif #endif /* _WG_QUEUEING_H */
3 2 1 3 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 // SPDX-License-Identifier: GPL-2.0 /* Some of this code is credited to Linux USB open source files that are distributed with Linux. Copyright: 2007 Metrologic Instruments. All rights reserved. Copyright: 2011 Azimut Ltd. <http://azimutrzn.ru/> */ #include <linux/kernel.h> #include <linux/tty.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/moduleparam.h> #include <linux/spinlock.h> #include <linux/uaccess.h> #include <linux/usb/serial.h> #define DRIVER_DESC "Metrologic Instruments Inc. - USB-POS driver" /* Product information. */ #define FOCUS_VENDOR_ID 0x0C2E #define FOCUS_PRODUCT_ID_BI 0x0720 #define FOCUS_PRODUCT_ID_UNI 0x0700 #define METROUSB_SET_REQUEST_TYPE 0x40 #define METROUSB_SET_MODEM_CTRL_REQUEST 10 #define METROUSB_SET_BREAK_REQUEST 0x40 #define METROUSB_MCR_NONE 0x08 /* Deactivate DTR and RTS. */ #define METROUSB_MCR_RTS 0x0a /* Activate RTS. */ #define METROUSB_MCR_DTR 0x09 /* Activate DTR. */ #define WDR_TIMEOUT 5000 /* default urb timeout. */ /* Private data structure. */ struct metrousb_private { spinlock_t lock; int throttled; unsigned long control_state; }; /* Device table list. */ static const struct usb_device_id id_table[] = { { USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_BI) }, { USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_UNI) }, { USB_DEVICE_INTERFACE_CLASS(0x0c2e, 0x0730, 0xff) }, /* MS7820 */ { }, /* Terminating entry. */ }; MODULE_DEVICE_TABLE(usb, id_table); /* UNI-Directional mode commands for device configure */ #define UNI_CMD_OPEN 0x80 #define UNI_CMD_CLOSE 0xFF static int metrousb_is_unidirectional_mode(struct usb_serial *serial) { u16 product_id = le16_to_cpu(serial->dev->descriptor.idProduct); return product_id == FOCUS_PRODUCT_ID_UNI; } static int metrousb_calc_num_ports(struct usb_serial *serial, struct usb_serial_endpoints *epds) { if (metrousb_is_unidirectional_mode(serial)) { if (epds->num_interrupt_out == 0) { dev_err(&serial->interface->dev, "interrupt-out endpoint missing\n"); return -ENODEV; } } return 1; } static int metrousb_send_unidirectional_cmd(u8 cmd, struct usb_serial_port *port) { int ret; int actual_len; u8 *buffer_cmd = NULL; if (!metrousb_is_unidirectional_mode(port->serial)) return 0; buffer_cmd = kzalloc(sizeof(cmd), GFP_KERNEL); if (!buffer_cmd) return -ENOMEM; *buffer_cmd = cmd; ret = usb_interrupt_msg(port->serial->dev, usb_sndintpipe(port->serial->dev, port->interrupt_out_endpointAddress), buffer_cmd, sizeof(cmd), &actual_len, USB_CTRL_SET_TIMEOUT); kfree(buffer_cmd); if (ret < 0) return ret; else if (actual_len != sizeof(cmd)) return -EIO; return 0; } static void metrousb_read_int_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; struct metrousb_private *metro_priv = usb_get_serial_port_data(port); unsigned char *data = urb->transfer_buffer; unsigned long flags; int throttled = 0; int result = 0; dev_dbg(&port->dev, "%s\n", __func__); switch (urb->status) { case 0: /* Success status, read from the port. */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* urb has been terminated. */ dev_dbg(&port->dev, "%s - urb shutting down, error code=%d\n", __func__, urb->status); return; default: dev_dbg(&port->dev, "%s - non-zero urb received, error code=%d\n", __func__, urb->status); goto exit; } /* Set the data read from the usb port into the serial port buffer. */ if (urb->actual_length) { /* Loop through the data copying each byte to the tty layer. */ tty_insert_flip_string(&port->port, data, urb->actual_length); /* Force the data to the tty layer. */ tty_flip_buffer_push(&port->port); } /* Set any port variables. */ spin_lock_irqsave(&metro_priv->lock, flags); throttled = metro_priv->throttled; spin_unlock_irqrestore(&metro_priv->lock, flags); if (throttled) return; exit: /* Try to resubmit the urb. */ result = usb_submit_urb(urb, GFP_ATOMIC); if (result) dev_err(&port->dev, "%s - failed submitting interrupt in urb, error code=%d\n", __func__, result); } static void metrousb_cleanup(struct usb_serial_port *port) { usb_kill_urb(port->interrupt_in_urb); metrousb_send_unidirectional_cmd(UNI_CMD_CLOSE, port); } static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct metrousb_private *metro_priv = usb_get_serial_port_data(port); unsigned long flags; int result = 0; /* Set the private data information for the port. */ spin_lock_irqsave(&metro_priv->lock, flags); metro_priv->control_state = 0; metro_priv->throttled = 0; spin_unlock_irqrestore(&metro_priv->lock, flags); /* Clear the urb pipe. */ usb_clear_halt(serial->dev, port->interrupt_in_urb->pipe); /* Start reading from the device */ usb_fill_int_urb(port->interrupt_in_urb, serial->dev, usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress), port->interrupt_in_urb->transfer_buffer, port->interrupt_in_urb->transfer_buffer_length, metrousb_read_int_callback, port, 1); result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (result) { dev_err(&port->dev, "%s - failed submitting interrupt in urb, error code=%d\n", __func__, result); return result; } /* Send activate cmd to device */ result = metrousb_send_unidirectional_cmd(UNI_CMD_OPEN, port); if (result) { dev_err(&port->dev, "%s - failed to configure device, error code=%d\n", __func__, result); goto err_kill_urb; } return 0; err_kill_urb: usb_kill_urb(port->interrupt_in_urb); return result; } static int metrousb_set_modem_ctrl(struct usb_serial *serial, unsigned int control_state) { int retval = 0; unsigned char mcr = METROUSB_MCR_NONE; dev_dbg(&serial->dev->dev, "%s - control state = %d\n", __func__, control_state); /* Set the modem control value. */ if (control_state & TIOCM_DTR) mcr |= METROUSB_MCR_DTR; if (control_state & TIOCM_RTS) mcr |= METROUSB_MCR_RTS; /* Send the command to the usb port. */ retval = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), METROUSB_SET_REQUEST_TYPE, METROUSB_SET_MODEM_CTRL_REQUEST, control_state, 0, NULL, 0, WDR_TIMEOUT); if (retval < 0) dev_err(&serial->dev->dev, "%s - set modem ctrl=0x%x failed, error code=%d\n", __func__, mcr, retval); return retval; } static int metrousb_port_probe(struct usb_serial_port *port) { struct metrousb_private *metro_priv; metro_priv = kzalloc(sizeof(*metro_priv), GFP_KERNEL); if (!metro_priv) return -ENOMEM; spin_lock_init(&metro_priv->lock); usb_set_serial_port_data(port, metro_priv); return 0; } static void metrousb_port_remove(struct usb_serial_port *port) { struct metrousb_private *metro_priv; metro_priv = usb_get_serial_port_data(port); kfree(metro_priv); } static void metrousb_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct metrousb_private *metro_priv = usb_get_serial_port_data(port); unsigned long flags; /* Set the private information for the port to stop reading data. */ spin_lock_irqsave(&metro_priv->lock, flags); metro_priv->throttled = 1; spin_unlock_irqrestore(&metro_priv->lock, flags); } static int metrousb_tiocmget(struct tty_struct *tty) { unsigned long control_state = 0; struct usb_serial_port *port = tty->driver_data; struct metrousb_private *metro_priv = usb_get_serial_port_data(port); unsigned long flags; spin_lock_irqsave(&metro_priv->lock, flags); control_state = metro_priv->control_state; spin_unlock_irqrestore(&metro_priv->lock, flags); return control_state; } static int metrousb_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct usb_serial *serial = port->serial; struct metrousb_private *metro_priv = usb_get_serial_port_data(port); unsigned long flags; unsigned long control_state = 0; dev_dbg(&port->dev, "%s - set=%d, clear=%d\n", __func__, set, clear); spin_lock_irqsave(&metro_priv->lock, flags); control_state = metro_priv->control_state; /* Set the RTS and DTR values. */ if (set & TIOCM_RTS) control_state |= TIOCM_RTS; if (set & TIOCM_DTR) control_state |= TIOCM_DTR; if (clear & TIOCM_RTS) control_state &= ~TIOCM_RTS; if (clear & TIOCM_DTR) control_state &= ~TIOCM_DTR; metro_priv->control_state = control_state; spin_unlock_irqrestore(&metro_priv->lock, flags); return metrousb_set_modem_ctrl(serial, control_state); } static void metrousb_unthrottle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct metrousb_private *metro_priv = usb_get_serial_port_data(port); unsigned long flags; int result = 0; /* Set the private information for the port to resume reading data. */ spin_lock_irqsave(&metro_priv->lock, flags); metro_priv->throttled = 0; spin_unlock_irqrestore(&metro_priv->lock, flags); /* Submit the urb to read from the port. */ result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC); if (result) dev_err(&port->dev, "failed submitting interrupt in urb error code=%d\n", result); } static struct usb_serial_driver metrousb_device = { .driver = { .name = "metro-usb", }, .description = "Metrologic USB to Serial", .id_table = id_table, .num_interrupt_in = 1, .calc_num_ports = metrousb_calc_num_ports, .open = metrousb_open, .close = metrousb_cleanup, .read_int_callback = metrousb_read_int_callback, .port_probe = metrousb_port_probe, .port_remove = metrousb_port_remove, .throttle = metrousb_throttle, .unthrottle = metrousb_unthrottle, .tiocmget = metrousb_tiocmget, .tiocmset = metrousb_tiocmset, }; static struct usb_serial_driver * const serial_drivers[] = { &metrousb_device, NULL, }; module_usb_serial_driver(serial_drivers, id_table); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Philip Nicastro"); MODULE_AUTHOR("Aleksey Babahin <tamerlan311@gmail.com>"); MODULE_DESCRIPTION(DRIVER_DESC);
5 5 5 5 2 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 // SPDX-License-Identifier: GPL-2.0-or-later /* Instantiate a public key crypto key from an X.509 Certificate * * Copyright (C) 2012, 2016 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #define pr_fmt(fmt) "ASYM: "fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/err.h> #include <crypto/public_key.h> #include "asymmetric_keys.h" static bool use_builtin_keys; static struct asymmetric_key_id *ca_keyid; #ifndef MODULE static struct { /* Must be last as it ends in a flexible-array member. */ TRAILING_OVERLAP(struct asymmetric_key_id, id, data, unsigned char data[10]; ); } cakey; static_assert(offsetof(typeof(cakey), id.data) == offsetof(typeof(cakey), data)); static int __init ca_keys_setup(char *str) { if (!str) /* default system keyring */ return 1; if (strncmp(str, "id:", 3) == 0) { struct asymmetric_key_id *p = &cakey.id; size_t hexlen = (strlen(str) - 3) / 2; int ret; if (hexlen == 0 || hexlen > sizeof(cakey.data)) { pr_err("Missing or invalid ca_keys id\n"); return 1; } ret = __asymmetric_key_hex_to_key_id(str + 3, p, hexlen); if (ret < 0) pr_err("Unparsable ca_keys id hex string\n"); else ca_keyid = p; /* owner key 'id:xxxxxx' */ } else if (strcmp(str, "builtin") == 0) { use_builtin_keys = true; } return 1; } __setup("ca_keys=", ca_keys_setup); #endif /** * restrict_link_by_signature - Restrict additions to a ring of public keys * @dest_keyring: Keyring being linked to. * @type: The type of key being added. * @payload: The payload of the new key. * @trust_keyring: A ring of keys that can be used to vouch for the new cert. * * Check the new certificate against the ones in the trust keyring. If one of * those is the signing key and validates the new certificate, then mark the * new certificate as being trusted. * * Returns 0 if the new certificate was accepted, -ENOKEY if we couldn't find a * matching parent certificate in the trusted list, -EKEYREJECTED if the * signature check fails or the key is blacklisted, -ENOPKG if the signature * uses unsupported crypto, or some other error if there is a matching * certificate but the signature check cannot be performed. */ int restrict_link_by_signature(struct key *dest_keyring, const struct key_type *type, const union key_payload *payload, struct key *trust_keyring) { const struct public_key_signature *sig; struct key *key; int ret; pr_devel("==>%s()\n", __func__); if (!trust_keyring) return -ENOKEY; if (type != &key_type_asymmetric) return -EOPNOTSUPP; sig = payload->data[asym_auth]; if (!sig) return -ENOPKG; if (!sig->auth_ids[0] && !sig->auth_ids[1] && !sig->auth_ids[2]) return -ENOKEY; if (ca_keyid && !asymmetric_key_id_partial(sig->auth_ids[1], ca_keyid)) return -EPERM; /* See if we have a key that signed this one. */ key = find_asymmetric_key(trust_keyring, sig->auth_ids[0], sig->auth_ids[1], sig->auth_ids[2], false); if (IS_ERR(key)) return -ENOKEY; if (use_builtin_keys && !test_bit(KEY_FLAG_BUILTIN, &key->flags)) ret = -ENOKEY; else if (IS_BUILTIN(CONFIG_SECONDARY_TRUSTED_KEYRING_SIGNED_BY_BUILTIN) && !strcmp(dest_keyring->description, ".secondary_trusted_keys") && !test_bit(KEY_FLAG_BUILTIN, &key->flags)) ret = -ENOKEY; else ret = verify_signature(key, sig); key_put(key); return ret; } /** * restrict_link_by_ca - Restrict additions to a ring of CA keys * @dest_keyring: Keyring being linked to. * @type: The type of key being added. * @payload: The payload of the new key. * @trust_keyring: Unused. * * Check if the new certificate is a CA. If it is a CA, then mark the new * certificate as being ok to link. * * Returns 0 if the new certificate was accepted, -ENOKEY if the * certificate is not a CA. -ENOPKG if the signature uses unsupported * crypto, or some other error if there is a matching certificate but * the signature check cannot be performed. */ int restrict_link_by_ca(struct key *dest_keyring, const struct key_type *type, const union key_payload *payload, struct key *trust_keyring) { const struct public_key *pkey; if (type != &key_type_asymmetric) return -EOPNOTSUPP; pkey = payload->data[asym_crypto]; if (!pkey) return -ENOPKG; if (!test_bit(KEY_EFLAG_CA, &pkey->key_eflags)) return -ENOKEY; if (!test_bit(KEY_EFLAG_KEYCERTSIGN, &pkey->key_eflags)) return -ENOKEY; if (!IS_ENABLED(CONFIG_INTEGRITY_CA_MACHINE_KEYRING_MAX)) return 0; if (test_bit(KEY_EFLAG_DIGITALSIG, &pkey->key_eflags)) return -ENOKEY; return 0; } /** * restrict_link_by_digsig - Restrict additions to a ring of digsig keys * @dest_keyring: Keyring being linked to. * @type: The type of key being added. * @payload: The payload of the new key. * @trust_keyring: A ring of keys that can be used to vouch for the new cert. * * Check if the new certificate has digitalSignature usage set. If it is, * then mark the new certificate as being ok to link. Afterwards verify * the new certificate against the ones in the trust_keyring. * * Returns 0 if the new certificate was accepted, -ENOKEY if the * certificate is not a digsig. -ENOPKG if the signature uses unsupported * crypto, or some other error if there is a matching certificate but * the signature check cannot be performed. */ int restrict_link_by_digsig(struct key *dest_keyring, const struct key_type *type, const union key_payload *payload, struct key *trust_keyring) { const struct public_key *pkey; if (type != &key_type_asymmetric) return -EOPNOTSUPP; pkey = payload->data[asym_crypto]; if (!pkey) return -ENOPKG; if (!test_bit(KEY_EFLAG_DIGITALSIG, &pkey->key_eflags)) return -ENOKEY; if (test_bit(KEY_EFLAG_CA, &pkey->key_eflags)) return -ENOKEY; if (test_bit(KEY_EFLAG_KEYCERTSIGN, &pkey->key_eflags)) return -ENOKEY; return restrict_link_by_signature(dest_keyring, type, payload, trust_keyring); } static bool match_either_id(const struct asymmetric_key_id **pair, const struct asymmetric_key_id *single) { return (asymmetric_key_id_same(pair[0], single) || asymmetric_key_id_same(pair[1], single)); } static int key_or_keyring_common(struct key *dest_keyring, const struct key_type *type, const union key_payload *payload, struct key *trusted, bool check_dest) { const struct public_key_signature *sig; struct key *key = NULL; int ret; pr_devel("==>%s()\n", __func__); if (!dest_keyring) return -ENOKEY; else if (dest_keyring->type != &key_type_keyring) return -EOPNOTSUPP; if (!trusted && !check_dest) return -ENOKEY; if (type != &key_type_asymmetric) return -EOPNOTSUPP; sig = payload->data[asym_auth]; if (!sig) return -ENOPKG; if (!sig->auth_ids[0] && !sig->auth_ids[1] && !sig->auth_ids[2]) return -ENOKEY; if (trusted) { if (trusted->type == &key_type_keyring) { /* See if we have a key that signed this one. */ key = find_asymmetric_key(trusted, sig->auth_ids[0], sig->auth_ids[1], sig->auth_ids[2], false); if (IS_ERR(key)) key = NULL; } else if (trusted->type == &key_type_asymmetric) { const struct asymmetric_key_id **signer_ids; signer_ids = (const struct asymmetric_key_id **) asymmetric_key_ids(trusted)->id; /* * The auth_ids come from the candidate key (the * one that is being considered for addition to * dest_keyring) and identify the key that was * used to sign. * * The signer_ids are identifiers for the * signing key specified for dest_keyring. * * The first auth_id is the preferred id, 2nd and * 3rd are the fallbacks. If exactly one of * auth_ids[0] and auth_ids[1] is present, it may * match either signer_ids[0] or signed_ids[1]. * If both are present the first one may match * either signed_id but the second one must match * the second signer_id. If neither of them is * available, auth_ids[2] is matched against * signer_ids[2] as a fallback. */ if (!sig->auth_ids[0] && !sig->auth_ids[1]) { if (asymmetric_key_id_same(signer_ids[2], sig->auth_ids[2])) key = __key_get(trusted); } else if (!sig->auth_ids[0] || !sig->auth_ids[1]) { const struct asymmetric_key_id *auth_id; auth_id = sig->auth_ids[0] ?: sig->auth_ids[1]; if (match_either_id(signer_ids, auth_id)) key = __key_get(trusted); } else if (asymmetric_key_id_same(signer_ids[1], sig->auth_ids[1]) && match_either_id(signer_ids, sig->auth_ids[0])) { key = __key_get(trusted); } } else { return -EOPNOTSUPP; } } if (check_dest && !key) { /* See if the destination has a key that signed this one. */ key = find_asymmetric_key(dest_keyring, sig->auth_ids[0], sig->auth_ids[1], sig->auth_ids[2], false); if (IS_ERR(key)) key = NULL; } if (!key) return -ENOKEY; ret = key_validate(key); if (ret == 0) ret = verify_signature(key, sig); key_put(key); return ret; } /** * restrict_link_by_key_or_keyring - Restrict additions to a ring of public * keys using the restrict_key information stored in the ring. * @dest_keyring: Keyring being linked to. * @type: The type of key being added. * @payload: The payload of the new key. * @trusted: A key or ring of keys that can be used to vouch for the new cert. * * Check the new certificate only against the key or keys passed in the data * parameter. If one of those is the signing key and validates the new * certificate, then mark the new certificate as being ok to link. * * Returns 0 if the new certificate was accepted, -ENOKEY if we * couldn't find a matching parent certificate in the trusted list, * -EKEYREJECTED if the signature check fails, -ENOPKG if the signature uses * unsupported crypto, or some other error if there is a matching certificate * but the signature check cannot be performed. */ int restrict_link_by_key_or_keyring(struct key *dest_keyring, const struct key_type *type, const union key_payload *payload, struct key *trusted) { return key_or_keyring_common(dest_keyring, type, payload, trusted, false); } /** * restrict_link_by_key_or_keyring_chain - Restrict additions to a ring of * public keys using the restrict_key information stored in the ring. * @dest_keyring: Keyring being linked to. * @type: The type of key being added. * @payload: The payload of the new key. * @trusted: A key or ring of keys that can be used to vouch for the new cert. * * Check the new certificate against the key or keys passed in the data * parameter and against the keys already linked to the destination keyring. If * one of those is the signing key and validates the new certificate, then mark * the new certificate as being ok to link. * * Returns 0 if the new certificate was accepted, -ENOKEY if we * couldn't find a matching parent certificate in the trusted list, * -EKEYREJECTED if the signature check fails, -ENOPKG if the signature uses * unsupported crypto, or some other error if there is a matching certificate * but the signature check cannot be performed. */ int restrict_link_by_key_or_keyring_chain(struct key *dest_keyring, const struct key_type *type, const union key_payload *payload, struct key *trusted) { return key_or_keyring_common(dest_keyring, type, payload, trusted, true); }
48 48 6 3 87 29 50 28 5 2 5 2 1 2 1 2 3 15 15 14 11 7 71 27 27 19 16 19 11 29 29 29 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 /* * linux/drivers/video/modedb.c -- Standard video mode database management * * Copyright (C) 1999 Geert Uytterhoeven * * 2001 - Documented with DocBook * - Brad Douglas <brad@neruo.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/export.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/fb.h> #include <linux/kernel.h> #undef DEBUG #define name_matches(v, s, l) \ ((v).name && !strncmp((s), (v).name, (l)) && strlen((v).name) == (l)) #define res_matches(v, x, y) \ ((v).xres == (x) && (v).yres == (y)) #ifdef DEBUG #define DPRINTK(fmt, args...) printk("modedb %s: " fmt, __func__ , ## args) #else #define DPRINTK(fmt, args...) #endif /* * Standard video mode definitions (taken from XFree86) */ static const struct fb_videomode modedb[] = { /* 640x400 @ 70 Hz, 31.5 kHz hsync */ { NULL, 70, 640, 400, 39721, 40, 24, 39, 9, 96, 2, 0, FB_VMODE_NONINTERLACED }, /* 640x480 @ 60 Hz, 31.5 kHz hsync */ { NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2, 0, FB_VMODE_NONINTERLACED }, /* 800x600 @ 56 Hz, 35.15 kHz hsync */ { NULL, 56, 800, 600, 27777, 128, 24, 22, 1, 72, 2, 0, FB_VMODE_NONINTERLACED }, /* 1024x768 @ 87 Hz interlaced, 35.5 kHz hsync */ { NULL, 87, 1024, 768, 22271, 56, 24, 33, 8, 160, 8, 0, FB_VMODE_INTERLACED }, /* 640x400 @ 85 Hz, 37.86 kHz hsync */ { NULL, 85, 640, 400, 31746, 96, 32, 41, 1, 64, 3, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 640x480 @ 72 Hz, 36.5 kHz hsync */ { NULL, 72, 640, 480, 31746, 144, 40, 30, 8, 40, 3, 0, FB_VMODE_NONINTERLACED }, /* 640x480 @ 75 Hz, 37.50 kHz hsync */ { NULL, 75, 640, 480, 31746, 120, 16, 16, 1, 64, 3, 0, FB_VMODE_NONINTERLACED }, /* 800x600 @ 60 Hz, 37.8 kHz hsync */ { NULL, 60, 800, 600, 25000, 88, 40, 23, 1, 128, 4, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 640x480 @ 85 Hz, 43.27 kHz hsync */ { NULL, 85, 640, 480, 27777, 80, 56, 25, 1, 56, 3, 0, FB_VMODE_NONINTERLACED }, /* 1152x864 @ 89 Hz interlaced, 44 kHz hsync */ { NULL, 89, 1152, 864, 15384, 96, 16, 110, 1, 216, 10, 0, FB_VMODE_INTERLACED }, /* 800x600 @ 72 Hz, 48.0 kHz hsync */ { NULL, 72, 800, 600, 20000, 64, 56, 23, 37, 120, 6, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 1024x768 @ 60 Hz, 48.4 kHz hsync */ { NULL, 60, 1024, 768, 15384, 168, 8, 29, 3, 144, 6, 0, FB_VMODE_NONINTERLACED }, /* 640x480 @ 100 Hz, 53.01 kHz hsync */ { NULL, 100, 640, 480, 21834, 96, 32, 36, 8, 96, 6, 0, FB_VMODE_NONINTERLACED }, /* 1152x864 @ 60 Hz, 53.5 kHz hsync */ { NULL, 60, 1152, 864, 11123, 208, 64, 16, 4, 256, 8, 0, FB_VMODE_NONINTERLACED }, /* 800x600 @ 85 Hz, 55.84 kHz hsync */ { NULL, 85, 800, 600, 16460, 160, 64, 36, 16, 64, 5, 0, FB_VMODE_NONINTERLACED }, /* 1024x768 @ 70 Hz, 56.5 kHz hsync */ { NULL, 70, 1024, 768, 13333, 144, 24, 29, 3, 136, 6, 0, FB_VMODE_NONINTERLACED }, /* 1280x1024 @ 87 Hz interlaced, 51 kHz hsync */ { NULL, 87, 1280, 1024, 12500, 56, 16, 128, 1, 216, 12, 0, FB_VMODE_INTERLACED }, /* 800x600 @ 100 Hz, 64.02 kHz hsync */ { NULL, 100, 800, 600, 14357, 160, 64, 30, 4, 64, 6, 0, FB_VMODE_NONINTERLACED }, /* 1024x768 @ 76 Hz, 62.5 kHz hsync */ { NULL, 76, 1024, 768, 11764, 208, 8, 36, 16, 120, 3, 0, FB_VMODE_NONINTERLACED }, /* 1152x864 @ 70 Hz, 62.4 kHz hsync */ { NULL, 70, 1152, 864, 10869, 106, 56, 20, 1, 160, 10, 0, FB_VMODE_NONINTERLACED }, /* 1280x1024 @ 61 Hz, 64.2 kHz hsync */ { NULL, 61, 1280, 1024, 9090, 200, 48, 26, 1, 184, 3, 0, FB_VMODE_NONINTERLACED }, /* 1400x1050 @ 60Hz, 63.9 kHz hsync */ { NULL, 60, 1400, 1050, 9259, 136, 40, 13, 1, 112, 3, 0, FB_VMODE_NONINTERLACED }, /* 1400x1050 @ 75,107 Hz, 82,392 kHz +hsync +vsync*/ { NULL, 75, 1400, 1050, 7190, 120, 56, 23, 10, 112, 13, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 1400x1050 @ 60 Hz, ? kHz +hsync +vsync*/ { NULL, 60, 1400, 1050, 9259, 128, 40, 12, 0, 112, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 1024x768 @ 85 Hz, 70.24 kHz hsync */ { NULL, 85, 1024, 768, 10111, 192, 32, 34, 14, 160, 6, 0, FB_VMODE_NONINTERLACED }, /* 1152x864 @ 78 Hz, 70.8 kHz hsync */ { NULL, 78, 1152, 864, 9090, 228, 88, 32, 0, 84, 12, 0, FB_VMODE_NONINTERLACED }, /* 1280x1024 @ 70 Hz, 74.59 kHz hsync */ { NULL, 70, 1280, 1024, 7905, 224, 32, 28, 8, 160, 8, 0, FB_VMODE_NONINTERLACED }, /* 1600x1200 @ 60Hz, 75.00 kHz hsync */ { NULL, 60, 1600, 1200, 6172, 304, 64, 46, 1, 192, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 1152x864 @ 84 Hz, 76.0 kHz hsync */ { NULL, 84, 1152, 864, 7407, 184, 312, 32, 0, 128, 12, 0, FB_VMODE_NONINTERLACED }, /* 1280x1024 @ 74 Hz, 78.85 kHz hsync */ { NULL, 74, 1280, 1024, 7407, 256, 32, 34, 3, 144, 3, 0, FB_VMODE_NONINTERLACED }, /* 1024x768 @ 100Hz, 80.21 kHz hsync */ { NULL, 100, 1024, 768, 8658, 192, 32, 21, 3, 192, 10, 0, FB_VMODE_NONINTERLACED }, /* 1280x1024 @ 76 Hz, 81.13 kHz hsync */ { NULL, 76, 1280, 1024, 7407, 248, 32, 34, 3, 104, 3, 0, FB_VMODE_NONINTERLACED }, /* 1600x1200 @ 70 Hz, 87.50 kHz hsync */ { NULL, 70, 1600, 1200, 5291, 304, 64, 46, 1, 192, 3, 0, FB_VMODE_NONINTERLACED }, /* 1152x864 @ 100 Hz, 89.62 kHz hsync */ { NULL, 100, 1152, 864, 7264, 224, 32, 17, 2, 128, 19, 0, FB_VMODE_NONINTERLACED }, /* 1280x1024 @ 85 Hz, 91.15 kHz hsync */ { NULL, 85, 1280, 1024, 6349, 224, 64, 44, 1, 160, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 1600x1200 @ 75 Hz, 93.75 kHz hsync */ { NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 1680x1050 @ 60 Hz, 65.191 kHz hsync */ { NULL, 60, 1680, 1050, 6848, 280, 104, 30, 3, 176, 6, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 1600x1200 @ 85 Hz, 105.77 kHz hsync */ { NULL, 85, 1600, 1200, 4545, 272, 16, 37, 4, 192, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 1280x1024 @ 100 Hz, 107.16 kHz hsync */ { NULL, 100, 1280, 1024, 5502, 256, 32, 26, 7, 128, 15, 0, FB_VMODE_NONINTERLACED }, /* 1800x1440 @ 64Hz, 96.15 kHz hsync */ { NULL, 64, 1800, 1440, 4347, 304, 96, 46, 1, 192, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 1800x1440 @ 70Hz, 104.52 kHz hsync */ { NULL, 70, 1800, 1440, 4000, 304, 96, 46, 1, 192, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 512x384 @ 78 Hz, 31.50 kHz hsync */ { NULL, 78, 512, 384, 49603, 48, 16, 16, 1, 64, 3, 0, FB_VMODE_NONINTERLACED }, /* 512x384 @ 85 Hz, 34.38 kHz hsync */ { NULL, 85, 512, 384, 45454, 48, 16, 16, 1, 64, 3, 0, FB_VMODE_NONINTERLACED }, /* 320x200 @ 70 Hz, 31.5 kHz hsync, 8:5 aspect ratio */ { NULL, 70, 320, 200, 79440, 16, 16, 20, 4, 48, 1, 0, FB_VMODE_DOUBLE }, /* 320x240 @ 60 Hz, 31.5 kHz hsync, 4:3 aspect ratio */ { NULL, 60, 320, 240, 79440, 16, 16, 16, 5, 48, 1, 0, FB_VMODE_DOUBLE }, /* 320x240 @ 72 Hz, 36.5 kHz hsync */ { NULL, 72, 320, 240, 63492, 16, 16, 16, 4, 48, 2, 0, FB_VMODE_DOUBLE }, /* 400x300 @ 56 Hz, 35.2 kHz hsync, 4:3 aspect ratio */ { NULL, 56, 400, 300, 55555, 64, 16, 10, 1, 32, 1, 0, FB_VMODE_DOUBLE }, /* 400x300 @ 60 Hz, 37.8 kHz hsync */ { NULL, 60, 400, 300, 50000, 48, 16, 11, 1, 64, 2, 0, FB_VMODE_DOUBLE }, /* 400x300 @ 72 Hz, 48.0 kHz hsync */ { NULL, 72, 400, 300, 40000, 32, 24, 11, 19, 64, 3, 0, FB_VMODE_DOUBLE }, /* 480x300 @ 56 Hz, 35.2 kHz hsync, 8:5 aspect ratio */ { NULL, 56, 480, 300, 46176, 80, 16, 10, 1, 40, 1, 0, FB_VMODE_DOUBLE }, /* 480x300 @ 60 Hz, 37.8 kHz hsync */ { NULL, 60, 480, 300, 41858, 56, 16, 11, 1, 80, 2, 0, FB_VMODE_DOUBLE }, /* 480x300 @ 63 Hz, 39.6 kHz hsync */ { NULL, 63, 480, 300, 40000, 56, 16, 11, 1, 80, 2, 0, FB_VMODE_DOUBLE }, /* 480x300 @ 72 Hz, 48.0 kHz hsync */ { NULL, 72, 480, 300, 33386, 40, 24, 11, 19, 80, 3, 0, FB_VMODE_DOUBLE }, /* 1920x1080 @ 60 Hz, 67.3 kHz hsync */ { NULL, 60, 1920, 1080, 6734, 148, 88, 36, 4, 44, 5, 0, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 1920x1200 @ 60 Hz, 74.5 Khz hsync */ { NULL, 60, 1920, 1200, 5177, 128, 336, 1, 38, 208, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 1152x768, 60 Hz, PowerBook G4 Titanium I and II */ { NULL, 60, 1152, 768, 14047, 158, 26, 29, 3, 136, 6, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }, /* 1366x768, 60 Hz, 47.403 kHz hsync, WXGA 16:9 aspect ratio */ { NULL, 60, 1366, 768, 13806, 120, 10, 14, 3, 32, 5, 0, FB_VMODE_NONINTERLACED }, /* 1280x800, 60 Hz, 47.403 kHz hsync, WXGA 16:10 aspect ratio */ { NULL, 60, 1280, 800, 12048, 200, 64, 24, 1, 136, 3, 0, FB_VMODE_NONINTERLACED }, /* 720x576i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */ { NULL, 50, 720, 576, 74074, 64, 16, 39, 5, 64, 5, 0, FB_VMODE_INTERLACED }, /* 800x520i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */ { NULL, 50, 800, 520, 58823, 144, 64, 72, 28, 80, 5, 0, FB_VMODE_INTERLACED }, /* 864x480 @ 60 Hz, 35.15 kHz hsync */ { NULL, 60, 864, 480, 27777, 1, 1, 1, 1, 0, 0, 0, FB_VMODE_NONINTERLACED }, }; #ifdef CONFIG_FB_MODE_HELPERS const struct fb_videomode vesa_modes[] = { /* 0 640x350-85 VESA */ { NULL, 85, 640, 350, 31746, 96, 32, 60, 32, 64, 3, FB_SYNC_HOR_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA}, /* 1 640x400-85 VESA */ { NULL, 85, 640, 400, 31746, 96, 32, 41, 01, 64, 3, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 2 720x400-85 VESA */ { NULL, 85, 721, 400, 28169, 108, 36, 42, 01, 72, 3, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 3 640x480-60 VESA */ { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2, 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 4 640x480-72 VESA */ { NULL, 72, 640, 480, 31746, 128, 24, 29, 9, 40, 2, 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 5 640x480-75 VESA */ { NULL, 75, 640, 480, 31746, 120, 16, 16, 01, 64, 3, 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 6 640x480-85 VESA */ { NULL, 85, 640, 480, 27777, 80, 56, 25, 01, 56, 3, 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 7 800x600-56 VESA */ { NULL, 56, 800, 600, 27777, 128, 24, 22, 01, 72, 2, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 8 800x600-60 VESA */ { NULL, 60, 800, 600, 25000, 88, 40, 23, 01, 128, 4, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 9 800x600-72 VESA */ { NULL, 72, 800, 600, 20000, 64, 56, 23, 37, 120, 6, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 10 800x600-75 VESA */ { NULL, 75, 800, 600, 20202, 160, 16, 21, 01, 80, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 11 800x600-85 VESA */ { NULL, 85, 800, 600, 17761, 152, 32, 27, 01, 64, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 12 1024x768i-43 VESA */ { NULL, 43, 1024, 768, 22271, 56, 8, 41, 0, 176, 8, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_INTERLACED, FB_MODE_IS_VESA }, /* 13 1024x768-60 VESA */ { NULL, 60, 1024, 768, 15384, 160, 24, 29, 3, 136, 6, 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 14 1024x768-70 VESA */ { NULL, 70, 1024, 768, 13333, 144, 24, 29, 3, 136, 6, 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 15 1024x768-75 VESA */ { NULL, 75, 1024, 768, 12690, 176, 16, 28, 1, 96, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 16 1024x768-85 VESA */ { NULL, 85, 1024, 768, 10582, 208, 48, 36, 1, 96, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 17 1152x864-75 VESA */ { NULL, 75, 1152, 864, 9259, 256, 64, 32, 1, 128, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 18 1280x960-60 VESA */ { NULL, 60, 1280, 960, 9259, 312, 96, 36, 1, 112, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 19 1280x960-85 VESA */ { NULL, 85, 1280, 960, 6734, 224, 64, 47, 1, 160, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 20 1280x1024-60 VESA */ { NULL, 60, 1280, 1024, 9259, 248, 48, 38, 1, 112, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 21 1280x1024-75 VESA */ { NULL, 75, 1280, 1024, 7407, 248, 16, 38, 1, 144, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 22 1280x1024-85 VESA */ { NULL, 85, 1280, 1024, 6349, 224, 64, 44, 1, 160, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 23 1600x1200-60 VESA */ { NULL, 60, 1600, 1200, 6172, 304, 64, 46, 1, 192, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 24 1600x1200-65 VESA */ { NULL, 65, 1600, 1200, 5698, 304, 64, 46, 1, 192, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 25 1600x1200-70 VESA */ { NULL, 70, 1600, 1200, 5291, 304, 64, 46, 1, 192, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 26 1600x1200-75 VESA */ { NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 27 1600x1200-85 VESA */ { NULL, 85, 1600, 1200, 4357, 304, 64, 46, 1, 192, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 28 1792x1344-60 VESA */ { NULL, 60, 1792, 1344, 4882, 328, 128, 46, 1, 200, 3, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 29 1792x1344-75 VESA */ { NULL, 75, 1792, 1344, 3831, 352, 96, 69, 1, 216, 3, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 30 1856x1392-60 VESA */ { NULL, 60, 1856, 1392, 4580, 352, 96, 43, 1, 224, 3, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 31 1856x1392-75 VESA */ { NULL, 75, 1856, 1392, 3472, 352, 128, 104, 1, 224, 3, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 32 1920x1440-60 VESA */ { NULL, 60, 1920, 1440, 4273, 344, 128, 56, 1, 200, 3, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 33 1920x1440-75 VESA */ { NULL, 75, 1920, 1440, 3367, 352, 144, 56, 1, 224, 3, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 34 1920x1200-60 RB VESA */ { NULL, 60, 1920, 1200, 6493, 80, 48, 26, 3, 32, 6, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 35 1920x1200-60 VESA */ { NULL, 60, 1920, 1200, 5174, 336, 136, 36, 3, 200, 6, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 36 1920x1200-75 VESA */ { NULL, 75, 1920, 1200, 4077, 344, 136, 46, 3, 208, 6, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 37 1920x1200-85 VESA */ { NULL, 85, 1920, 1200, 3555, 352, 144, 53, 3, 208, 6, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 38 2560x1600-60 RB VESA */ { NULL, 60, 2560, 1600, 3724, 80, 48, 37, 3, 32, 6, FB_SYNC_HOR_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 39 2560x1600-60 VESA */ { NULL, 60, 2560, 1600, 2869, 472, 192, 49, 3, 280, 6, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 40 2560x1600-75 VESA */ { NULL, 75, 2560, 1600, 2256, 488, 208, 63, 3, 280, 6, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 41 2560x1600-85 VESA */ { NULL, 85, 2560, 1600, 1979, 488, 208, 73, 3, 280, 6, FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, /* 42 2560x1600-120 RB VESA */ { NULL, 120, 2560, 1600, 1809, 80, 48, 85, 3, 32, 6, FB_SYNC_HOR_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, }; EXPORT_SYMBOL(vesa_modes); const struct dmt_videomode dmt_modes[DMT_SIZE] = { { 0x01, 0x0000, 0x000000, &vesa_modes[0] }, { 0x02, 0x3119, 0x000000, &vesa_modes[1] }, { 0x03, 0x0000, 0x000000, &vesa_modes[2] }, { 0x04, 0x3140, 0x000000, &vesa_modes[3] }, { 0x05, 0x314c, 0x000000, &vesa_modes[4] }, { 0x06, 0x314f, 0x000000, &vesa_modes[5] }, { 0x07, 0x3159, 0x000000, &vesa_modes[6] }, { 0x08, 0x0000, 0x000000, &vesa_modes[7] }, { 0x09, 0x4540, 0x000000, &vesa_modes[8] }, { 0x0a, 0x454c, 0x000000, &vesa_modes[9] }, { 0x0b, 0x454f, 0x000000, &vesa_modes[10] }, { 0x0c, 0x4559, 0x000000, &vesa_modes[11] }, { 0x0d, 0x0000, 0x000000, NULL }, { 0x0e, 0x0000, 0x000000, NULL }, { 0x0f, 0x0000, 0x000000, &vesa_modes[12] }, { 0x10, 0x6140, 0x000000, &vesa_modes[13] }, { 0x11, 0x614a, 0x000000, &vesa_modes[14] }, { 0x12, 0x614f, 0x000000, &vesa_modes[15] }, { 0x13, 0x6159, 0x000000, &vesa_modes[16] }, { 0x14, 0x0000, 0x000000, NULL }, { 0x15, 0x714f, 0x000000, &vesa_modes[17] }, { 0x16, 0x0000, 0x7f1c21, NULL }, { 0x17, 0x0000, 0x7f1c28, NULL }, { 0x18, 0x0000, 0x7f1c44, NULL }, { 0x19, 0x0000, 0x7f1c62, NULL }, { 0x1a, 0x0000, 0x000000, NULL }, { 0x1b, 0x0000, 0x8f1821, NULL }, { 0x1c, 0x8100, 0x8f1828, NULL }, { 0x1d, 0x810f, 0x8f1844, NULL }, { 0x1e, 0x8119, 0x8f1862, NULL }, { 0x1f, 0x0000, 0x000000, NULL }, { 0x20, 0x8140, 0x000000, &vesa_modes[18] }, { 0x21, 0x8159, 0x000000, &vesa_modes[19] }, { 0x22, 0x0000, 0x000000, NULL }, { 0x23, 0x8180, 0x000000, &vesa_modes[20] }, { 0x24, 0x818f, 0x000000, &vesa_modes[21] }, { 0x25, 0x8199, 0x000000, &vesa_modes[22] }, { 0x26, 0x0000, 0x000000, NULL }, { 0x27, 0x0000, 0x000000, NULL }, { 0x28, 0x0000, 0x000000, NULL }, { 0x29, 0x0000, 0x0c2021, NULL }, { 0x2a, 0x9040, 0x0c2028, NULL }, { 0x2b, 0x904f, 0x0c2044, NULL }, { 0x2c, 0x9059, 0x0c2062, NULL }, { 0x2d, 0x0000, 0x000000, NULL }, { 0x2e, 0x9500, 0xc11821, NULL }, { 0x2f, 0x9500, 0xc11828, NULL }, { 0x30, 0x950f, 0xc11844, NULL }, { 0x31, 0x9519, 0xc11868, NULL }, { 0x32, 0x0000, 0x000000, NULL }, { 0x33, 0xa940, 0x000000, &vesa_modes[23] }, { 0x34, 0xa945, 0x000000, &vesa_modes[24] }, { 0x35, 0xa94a, 0x000000, &vesa_modes[25] }, { 0x36, 0xa94f, 0x000000, &vesa_modes[26] }, { 0x37, 0xa959, 0x000000, &vesa_modes[27] }, { 0x38, 0x0000, 0x000000, NULL }, { 0x39, 0x0000, 0x0c2821, NULL }, { 0x3a, 0xb300, 0x0c2828, NULL }, { 0x3b, 0xb30f, 0x0c2844, NULL }, { 0x3c, 0xb319, 0x0c2868, NULL }, { 0x3d, 0x0000, 0x000000, NULL }, { 0x3e, 0xc140, 0x000000, &vesa_modes[28] }, { 0x3f, 0xc14f, 0x000000, &vesa_modes[29] }, { 0x40, 0x0000, 0x000000, NULL}, { 0x41, 0xc940, 0x000000, &vesa_modes[30] }, { 0x42, 0xc94f, 0x000000, &vesa_modes[31] }, { 0x43, 0x0000, 0x000000, NULL }, { 0x44, 0x0000, 0x572821, &vesa_modes[34] }, { 0x45, 0xd100, 0x572828, &vesa_modes[35] }, { 0x46, 0xd10f, 0x572844, &vesa_modes[36] }, { 0x47, 0xd119, 0x572862, &vesa_modes[37] }, { 0x48, 0x0000, 0x000000, NULL }, { 0x49, 0xd140, 0x000000, &vesa_modes[32] }, { 0x4a, 0xd14f, 0x000000, &vesa_modes[33] }, { 0x4b, 0x0000, 0x000000, NULL }, { 0x4c, 0x0000, 0x1f3821, &vesa_modes[38] }, { 0x4d, 0x0000, 0x1f3828, &vesa_modes[39] }, { 0x4e, 0x0000, 0x1f3844, &vesa_modes[40] }, { 0x4f, 0x0000, 0x1f3862, &vesa_modes[41] }, { 0x50, 0x0000, 0x000000, &vesa_modes[42] }, }; EXPORT_SYMBOL(dmt_modes); #endif /* CONFIG_FB_MODE_HELPERS */ /** * fb_try_mode - test a video mode * @var: frame buffer user defined part of display * @info: frame buffer info structure * @mode: frame buffer video mode structure * @bpp: color depth in bits per pixel * * Tries a video mode to test it's validity for device @info. * * Returns 1 on success. * */ static int fb_try_mode(struct fb_var_screeninfo *var, struct fb_info *info, const struct fb_videomode *mode, unsigned int bpp) { int err = 0; DPRINTK("Trying mode %s %dx%d-%d@%d\n", mode->name ? mode->name : "noname", mode->xres, mode->yres, bpp, mode->refresh); var->xres = mode->xres; var->yres = mode->yres; var->xres_virtual = mode->xres; var->yres_virtual = mode->yres; var->xoffset = 0; var->yoffset = 0; var->bits_per_pixel = bpp; var->activate |= FB_ACTIVATE_TEST; var->pixclock = mode->pixclock; var->left_margin = mode->left_margin; var->right_margin = mode->right_margin; var->upper_margin = mode->upper_margin; var->lower_margin = mode->lower_margin; var->hsync_len = mode->hsync_len; var->vsync_len = mode->vsync_len; var->sync = mode->sync; var->vmode = mode->vmode; if (info->fbops->fb_check_var) err = info->fbops->fb_check_var(var, info); var->activate &= ~FB_ACTIVATE_TEST; return err; } /** * fb_find_mode - finds a valid video mode * @var: frame buffer user defined part of display * @info: frame buffer info structure * @mode_option: string video mode to find * @db: video mode database * @dbsize: size of @db * @default_mode: default video mode to fall back to * @default_bpp: default color depth in bits per pixel * * Finds a suitable video mode, starting with the specified mode * in @mode_option with fallback to @default_mode. If * @default_mode fails, all modes in the video mode database will * be tried. * * Valid mode specifiers for @mode_option:: * * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][p][m] * * or :: * * <name>[-<bpp>][@<refresh>] * * with <xres>, <yres>, <bpp> and <refresh> decimal numbers and * <name> a string. * * If 'M' is present after yres (and before refresh/bpp if present), * the function will compute the timings using VESA(tm) Coordinated * Video Timings (CVT). If 'R' is present after 'M', will compute with * reduced blanking (for flatpanels). If 'i' or 'p' are present, compute * interlaced or progressive mode. If 'm' is present, add margins equal * to 1.8% of xres rounded down to 8 pixels, and 1.8% of yres. The char * 'i', 'p' and 'm' must be after 'M' and 'R'. Example:: * * 1024x768MR-8@60m - Reduced blank with margins at 60Hz. * * NOTE: The passed struct @var is _not_ cleared! This allows you * to supply values for e.g. the grayscale and accel_flags fields. * * Returns zero for failure, 1 if using specified @mode_option, * 2 if using specified @mode_option with an ignored refresh rate, * 3 if default mode is used, 4 if fall back to any valid mode. */ int fb_find_mode(struct fb_var_screeninfo *var, struct fb_info *info, const char *mode_option, const struct fb_videomode *db, unsigned int dbsize, const struct fb_videomode *default_mode, unsigned int default_bpp) { char *mode_option_buf = NULL; int i; /* Set up defaults */ if (!db) { db = modedb; dbsize = ARRAY_SIZE(modedb); } if (!default_mode) default_mode = &db[0]; if (!default_bpp) default_bpp = 8; /* Did the user specify a video mode? */ if (!mode_option) { fb_get_options(NULL, &mode_option_buf); mode_option = mode_option_buf; } if (mode_option) { const char *name = mode_option; unsigned int namelen = strlen(name); int res_specified = 0, bpp_specified = 0, refresh_specified = 0; unsigned int xres = 0, yres = 0, bpp = default_bpp, refresh = 0; int yres_specified = 0, cvt = 0, rb = 0; int interlace_specified = 0, interlace = 0; int margins = 0; u32 best, diff, tdiff; for (i = namelen-1; i >= 0; i--) { switch (name[i]) { case '@': namelen = i; if (!refresh_specified && !bpp_specified && !yres_specified) { refresh = simple_strtol(&name[i+1], NULL, 10); refresh_specified = 1; if (cvt || rb) cvt = 0; } else goto done; break; case '-': namelen = i; if (!bpp_specified && !yres_specified) { bpp = simple_strtol(&name[i+1], NULL, 10); bpp_specified = 1; if (cvt || rb) cvt = 0; } else goto done; break; case 'x': if (!yres_specified) { yres = simple_strtol(&name[i+1], NULL, 10); yres_specified = 1; } else goto done; break; case '0' ... '9': break; case 'M': if (!yres_specified) cvt = 1; break; case 'R': if (!cvt) rb = 1; break; case 'm': if (!cvt) margins = 1; break; case 'p': if (!cvt) { interlace = 0; interlace_specified = 1; } break; case 'i': if (!cvt) { interlace = 1; interlace_specified = 1; } break; default: goto done; } } if (i < 0 && yres_specified) { xres = simple_strtol(name, NULL, 10); res_specified = 1; } done: kfree(mode_option_buf); if (cvt) { struct fb_videomode cvt_mode; int ret; DPRINTK("CVT mode %dx%d@%dHz%s%s%s\n", xres, yres, (refresh) ? refresh : 60, (rb) ? " reduced blanking" : "", (margins) ? " with margins" : "", (interlace) ? " interlaced" : ""); memset(&cvt_mode, 0, sizeof(cvt_mode)); cvt_mode.xres = xres; cvt_mode.yres = yres; cvt_mode.refresh = (refresh) ? refresh : 60; if (interlace) cvt_mode.vmode |= FB_VMODE_INTERLACED; else cvt_mode.vmode &= ~FB_VMODE_INTERLACED; ret = fb_find_mode_cvt(&cvt_mode, margins, rb); if (!ret && !fb_try_mode(var, info, &cvt_mode, bpp)) { DPRINTK("modedb CVT: CVT mode ok\n"); return 1; } DPRINTK("CVT mode invalid, getting mode from database\n"); } DPRINTK("Trying specified video mode%s %ix%i\n", refresh_specified ? "" : " (ignoring refresh rate)", xres, yres); if (!refresh_specified) { /* * If the caller has provided a custom mode database and * a valid monspecs structure, we look for the mode with * the highest refresh rate. Otherwise we play it safe * it and try to find a mode with a refresh rate closest * to the standard 60 Hz. */ if (db != modedb && info->monspecs.vfmin && info->monspecs.vfmax && info->monspecs.hfmin && info->monspecs.hfmax && info->monspecs.dclkmax) { refresh = 1000; } else { refresh = 60; } } diff = -1; best = -1; for (i = 0; i < dbsize; i++) { if ((name_matches(db[i], name, namelen) || (res_specified && res_matches(db[i], xres, yres))) && !fb_try_mode(var, info, &db[i], bpp)) { const int db_interlace = (db[i].vmode & FB_VMODE_INTERLACED ? 1 : 0); int score = abs(db[i].refresh - refresh); if (interlace_specified) score += abs(db_interlace - interlace); if (!interlace_specified || db_interlace == interlace) if (refresh_specified && db[i].refresh == refresh) return 1; if (score < diff) { diff = score; best = i; } } } if (best != -1) { fb_try_mode(var, info, &db[best], bpp); return (refresh_specified) ? 2 : 1; } diff = 2 * (xres + yres); best = -1; DPRINTK("Trying best-fit modes\n"); for (i = 0; i < dbsize; i++) { DPRINTK("Trying %ix%i\n", db[i].xres, db[i].yres); if (!fb_try_mode(var, info, &db[i], bpp)) { tdiff = abs(db[i].xres - xres) + abs(db[i].yres - yres); /* * Penalize modes with resolutions smaller * than requested. */ if (xres > db[i].xres || yres > db[i].yres) tdiff += xres + yres; if (diff > tdiff) { diff = tdiff; best = i; } } } if (best != -1) { fb_try_mode(var, info, &db[best], bpp); return 5; } } DPRINTK("Trying default video mode\n"); if (!fb_try_mode(var, info, default_mode, default_bpp)) return 3; DPRINTK("Trying all modes\n"); for (i = 0; i < dbsize; i++) if (!fb_try_mode(var, info, &db[i], default_bpp)) return 4; DPRINTK("No valid mode found\n"); return 0; } /** * fb_var_to_videomode - convert fb_var_screeninfo to fb_videomode * @mode: pointer to struct fb_videomode * @var: pointer to struct fb_var_screeninfo */ void fb_var_to_videomode(struct fb_videomode *mode, const struct fb_var_screeninfo *var) { u32 pixclock, hfreq, htotal, vtotal; mode->name = NULL; mode->xres = var->xres; mode->yres = var->yres; mode->pixclock = var->pixclock; mode->hsync_len = var->hsync_len; mode->vsync_len = var->vsync_len; mode->left_margin = var->left_margin; mode->right_margin = var->right_margin; mode->upper_margin = var->upper_margin; mode->lower_margin = var->lower_margin; mode->sync = var->sync; mode->vmode = var->vmode & FB_VMODE_MASK; mode->flag = FB_MODE_IS_FROM_VAR; mode->refresh = 0; if (!var->pixclock) return; pixclock = PICOS2KHZ(var->pixclock) * 1000; htotal = var->xres + var->right_margin + var->hsync_len + var->left_margin; vtotal = var->yres + var->lower_margin + var->vsync_len + var->upper_margin; if (var->vmode & FB_VMODE_INTERLACED) vtotal /= 2; if (var->vmode & FB_VMODE_DOUBLE) vtotal *= 2; if (!htotal || !vtotal) return; hfreq = pixclock/htotal; mode->refresh = hfreq/vtotal; } /** * fb_videomode_to_var - convert fb_videomode to fb_var_screeninfo * @var: pointer to struct fb_var_screeninfo * @mode: pointer to struct fb_videomode */ void fb_videomode_to_var(struct fb_var_screeninfo *var, const struct fb_videomode *mode) { var->xres = mode->xres; var->yres = mode->yres; var->xres_virtual = mode->xres; var->yres_virtual = mode->yres; var->xoffset = 0; var->yoffset = 0; var->pixclock = mode->pixclock; var->left_margin = mode->left_margin; var->right_margin = mode->right_margin; var->upper_margin = mode->upper_margin; var->lower_margin = mode->lower_margin; var->hsync_len = mode->hsync_len; var->vsync_len = mode->vsync_len; var->sync = mode->sync; var->vmode = mode->vmode & FB_VMODE_MASK; } /** * fb_mode_is_equal - compare 2 videomodes * @mode1: first videomode * @mode2: second videomode * * RETURNS: * 1 if equal, 0 if not */ int fb_mode_is_equal(const struct fb_videomode *mode1, const struct fb_videomode *mode2) { return (mode1->xres == mode2->xres && mode1->yres == mode2->yres && mode1->pixclock == mode2->pixclock && mode1->hsync_len == mode2->hsync_len && mode1->vsync_len == mode2->vsync_len && mode1->left_margin == mode2->left_margin && mode1->right_margin == mode2->right_margin && mode1->upper_margin == mode2->upper_margin && mode1->lower_margin == mode2->lower_margin && mode1->sync == mode2->sync && mode1->vmode == mode2->vmode); } /** * fb_find_best_mode - find best matching videomode * @var: pointer to struct fb_var_screeninfo * @head: pointer to struct list_head of modelist * * RETURNS: * struct fb_videomode, NULL if none found * * IMPORTANT: * This function assumes that all modelist entries in * info->modelist are valid. * * NOTES: * Finds best matching videomode which has an equal or greater dimension than * var->xres and var->yres. If more than 1 videomode is found, will return * the videomode with the highest refresh rate */ const struct fb_videomode *fb_find_best_mode(const struct fb_var_screeninfo *var, struct list_head *head) { struct fb_modelist *modelist; struct fb_videomode *mode, *best = NULL; u32 diff = -1; list_for_each_entry(modelist, head, list) { u32 d; mode = &modelist->mode; if (mode->xres >= var->xres && mode->yres >= var->yres) { d = (mode->xres - var->xres) + (mode->yres - var->yres); if (diff > d) { diff = d; best = mode; } else if (diff == d && best && mode->refresh > best->refresh) best = mode; } } return best; } /** * fb_find_nearest_mode - find closest videomode * * @mode: pointer to struct fb_videomode * @head: pointer to modelist * * Finds best matching videomode, smaller or greater in dimension. * If more than 1 videomode is found, will return the videomode with * the closest refresh rate. */ const struct fb_videomode *fb_find_nearest_mode(const struct fb_videomode *mode, struct list_head *head) { struct fb_modelist *modelist; struct fb_videomode *cmode, *best = NULL; u32 diff = -1, diff_refresh = -1; list_for_each_entry(modelist, head, list) { u32 d; cmode = &modelist->mode; d = abs(cmode->xres - mode->xres) + abs(cmode->yres - mode->yres); if (diff > d) { diff = d; diff_refresh = abs(cmode->refresh - mode->refresh); best = cmode; } else if (diff == d) { d = abs(cmode->refresh - mode->refresh); if (diff_refresh > d) { diff_refresh = d; best = cmode; } } } return best; } /** * fb_match_mode - find a videomode which exactly matches the timings in var * @var: pointer to struct fb_var_screeninfo * @head: pointer to struct list_head of modelist * * RETURNS: * struct fb_videomode, NULL if none found */ const struct fb_videomode *fb_match_mode(const struct fb_var_screeninfo *var, struct list_head *head) { struct fb_modelist *modelist; struct fb_videomode *m, mode; fb_var_to_videomode(&mode, var); list_for_each_entry(modelist, head, list) { m = &modelist->mode; if (fb_mode_is_equal(m, &mode)) return m; } return NULL; } /** * fb_add_videomode - adds videomode entry to modelist * @mode: videomode to add * @head: struct list_head of modelist * * NOTES: * Will only add unmatched mode entries */ int fb_add_videomode(const struct fb_videomode *mode, struct list_head *head) { struct fb_modelist *modelist; struct fb_videomode *m; int found = 0; list_for_each_entry(modelist, head, list) { m = &modelist->mode; if (fb_mode_is_equal(m, mode)) { found = 1; break; } } if (!found) { modelist = kmalloc(sizeof(struct fb_modelist), GFP_KERNEL); if (!modelist) return -ENOMEM; modelist->mode = *mode; list_add(&modelist->list, head); } return 0; } /** * fb_delete_videomode - removed videomode entry from modelist * @mode: videomode to remove * @head: struct list_head of modelist * * NOTES: * Will remove all matching mode entries */ void fb_delete_videomode(const struct fb_videomode *mode, struct list_head *head) { struct list_head *pos, *n; struct fb_modelist *modelist; struct fb_videomode *m; list_for_each_safe(pos, n, head) { modelist = list_entry(pos, struct fb_modelist, list); m = &modelist->mode; if (fb_mode_is_equal(m, mode)) { list_del(pos); kfree(pos); } } } /** * fb_destroy_modelist - destroy modelist * @head: struct list_head of modelist */ void fb_destroy_modelist(struct list_head *head) { struct list_head *pos, *n; list_for_each_safe(pos, n, head) { list_del(pos); kfree(pos); } } EXPORT_SYMBOL_GPL(fb_destroy_modelist); /** * fb_videomode_to_modelist - convert mode array to mode list * @modedb: array of struct fb_videomode * @num: number of entries in array * @head: struct list_head of modelist */ void fb_videomode_to_modelist(const struct fb_videomode *modedb, int num, struct list_head *head) { int i; INIT_LIST_HEAD(head); for (i = 0; i < num; i++) { if (fb_add_videomode(&modedb[i], head)) return; } } const struct fb_videomode *fb_find_best_display(const struct fb_monspecs *specs, struct list_head *head) { struct fb_modelist *modelist; const struct fb_videomode *m, *m1 = NULL, *md = NULL, *best = NULL; int first = 0; if (!head->prev || !head->next || list_empty(head)) goto finished; /* get the first detailed mode and the very first mode */ list_for_each_entry(modelist, head, list) { m = &modelist->mode; if (!first) { m1 = m; first = 1; } if (m->flag & FB_MODE_IS_FIRST) { md = m; break; } } /* first detailed timing is preferred */ if (specs->misc & FB_MISC_1ST_DETAIL) { best = md; goto finished; } /* find best mode based on display width and height */ if (specs->max_x && specs->max_y) { struct fb_var_screeninfo var; memset(&var, 0, sizeof(struct fb_var_screeninfo)); var.xres = (specs->max_x * 7200)/254; var.yres = (specs->max_y * 7200)/254; m = fb_find_best_mode(&var, head); if (m) { best = m; goto finished; } } /* use first detailed mode */ if (md) { best = md; goto finished; } /* last resort, use the very first mode */ best = m1; finished: return best; } EXPORT_SYMBOL(fb_find_best_display); EXPORT_SYMBOL(fb_videomode_to_var); EXPORT_SYMBOL(fb_var_to_videomode); EXPORT_SYMBOL(fb_mode_is_equal); EXPORT_SYMBOL(fb_add_videomode); EXPORT_SYMBOL(fb_match_mode); EXPORT_SYMBOL(fb_find_best_mode); EXPORT_SYMBOL(fb_find_nearest_mode); EXPORT_SYMBOL(fb_videomode_to_modelist); EXPORT_SYMBOL(fb_find_mode); EXPORT_SYMBOL(fb_find_mode_cvt);
78 78 78 78 78 78 78 78 126 126 126 126 126 126 126 126 126 126 126 126 126 126 125 126 126 126 126 126 126 126 126 126 126 126 48 126 126 126 126 126 126 126 126 126 126 126 126 126 126 126 126 126 48 48 48 48 48 48 48 48 48 48 48 48 48 48 126 126 126 30 48 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar * Copyright (C) 2005-2006 Thomas Gleixner * * This file contains driver APIs to the irq subsystem. */ #define pr_fmt(fmt) "genirq: " fmt #include <linux/irq.h> #include <linux/kthread.h> #include <linux/module.h> #include <linux/random.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/sched/rt.h> #include <linux/sched/task.h> #include <linux/sched/isolation.h> #include <uapi/linux/sched/types.h> #include <linux/task_work.h> #include "internals.h" #if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT) DEFINE_STATIC_KEY_FALSE(force_irqthreads_key); static int __init setup_forced_irqthreads(char *arg) { static_branch_enable(&force_irqthreads_key); return 0; } early_param("threadirqs", setup_forced_irqthreads); #endif static int __irq_get_irqchip_state(struct irq_data *d, enum irqchip_irq_state which, bool *state); static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip) { struct irq_data *irqd = irq_desc_get_irq_data(desc); bool inprogress; do { /* * Wait until we're out of the critical section. This might * give the wrong answer due to the lack of memory barriers. */ while (irqd_irq_inprogress(&desc->irq_data)) cpu_relax(); /* Ok, that indicated we're done: double-check carefully. */ guard(raw_spinlock_irqsave)(&desc->lock); inprogress = irqd_irq_inprogress(&desc->irq_data); /* * If requested and supported, check at the chip whether it * is in flight at the hardware level, i.e. already pending * in a CPU and waiting for service and acknowledge. */ if (!inprogress && sync_chip) { /* * Ignore the return code. inprogress is only updated * when the chip supports it. */ __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE, &inprogress); } /* Oops, that failed? */ } while (inprogress); } /** * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) * @irq: interrupt number to wait for * * This function waits for any pending hard IRQ handlers for this interrupt * to complete before returning. If you use this function while holding a * resource the IRQ handler may need you will deadlock. It does not take * associated threaded handlers into account. * * Do not use this for shutdown scenarios where you must be sure that all * parts (hardirq and threaded handler) have completed. * * Returns: false if a threaded handler is active. * * This function may be called - with care - from IRQ context. * * It does not check whether there is an interrupt in flight at the * hardware level, but not serviced yet, as this might deadlock when called * with interrupts disabled and the target CPU of the interrupt is the * current CPU. */ bool synchronize_hardirq(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); if (desc) { __synchronize_hardirq(desc, false); return !atomic_read(&desc->threads_active); } return true; } EXPORT_SYMBOL(synchronize_hardirq); static void __synchronize_irq(struct irq_desc *desc) { __synchronize_hardirq(desc, true); /* * We made sure that no hardirq handler is running. Now verify that no * threaded handlers are active. */ wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); } /** * synchronize_irq - wait for pending IRQ handlers (on other CPUs) * @irq: interrupt number to wait for * * This function waits for any pending IRQ handlers for this interrupt to * complete before returning. If you use this function while holding a * resource the IRQ handler may need you will deadlock. * * Can only be called from preemptible code as it might sleep when * an interrupt thread is associated to @irq. * * It optionally makes sure (when the irq chip supports that method) * that the interrupt is not pending in any CPU and waiting for * service. */ void synchronize_irq(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); if (desc) __synchronize_irq(desc); } EXPORT_SYMBOL(synchronize_irq); #ifdef CONFIG_SMP cpumask_var_t irq_default_affinity; static bool __irq_can_set_affinity(struct irq_desc *desc) { if (!desc || !irqd_can_balance(&desc->irq_data) || !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) return false; return true; } /** * irq_can_set_affinity - Check if the affinity of a given irq can be set * @irq: Interrupt to check * */ int irq_can_set_affinity(unsigned int irq) { return __irq_can_set_affinity(irq_to_desc(irq)); } /** * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space * @irq: Interrupt to check * * Like irq_can_set_affinity() above, but additionally checks for the * AFFINITY_MANAGED flag. */ bool irq_can_set_affinity_usr(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); return __irq_can_set_affinity(desc) && !irqd_affinity_is_managed(&desc->irq_data); } /** * irq_set_thread_affinity - Notify irq threads to adjust affinity * @desc: irq descriptor which has affinity changed * * Just set IRQTF_AFFINITY and delegate the affinity setting to the * interrupt thread itself. We can not call set_cpus_allowed_ptr() here as * we hold desc->lock and this code can be called from hard interrupt * context. */ static void irq_set_thread_affinity(struct irq_desc *desc) { struct irqaction *action; for_each_action_of_desc(desc, action) { if (action->thread) { set_bit(IRQTF_AFFINITY, &action->thread_flags); wake_up_process(action->thread); } if (action->secondary && action->secondary->thread) { set_bit(IRQTF_AFFINITY, &action->secondary->thread_flags); wake_up_process(action->secondary->thread); } } } #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK static void irq_validate_effective_affinity(struct irq_data *data) { const struct cpumask *m = irq_data_get_effective_affinity_mask(data); struct irq_chip *chip = irq_data_get_irq_chip(data); if (!cpumask_empty(m)) return; pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n", chip->name, data->irq); } #else static inline void irq_validate_effective_affinity(struct irq_data *data) { } #endif static DEFINE_PER_CPU(struct cpumask, __tmp_mask); int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { struct cpumask *tmp_mask = this_cpu_ptr(&__tmp_mask); struct irq_desc *desc = irq_data_to_desc(data); struct irq_chip *chip = irq_data_get_irq_chip(data); const struct cpumask *prog_mask; int ret; if (!chip || !chip->irq_set_affinity) return -EINVAL; /* * If this is a managed interrupt and housekeeping is enabled on * it check whether the requested affinity mask intersects with * a housekeeping CPU. If so, then remove the isolated CPUs from * the mask and just keep the housekeeping CPU(s). This prevents * the affinity setter from routing the interrupt to an isolated * CPU to avoid that I/O submitted from a housekeeping CPU causes * interrupts on an isolated one. * * If the masks do not intersect or include online CPU(s) then * keep the requested mask. The isolated target CPUs are only * receiving interrupts when the I/O operation was submitted * directly from them. * * If all housekeeping CPUs in the affinity mask are offline, the * interrupt will be migrated by the CPU hotplug code once a * housekeeping CPU which belongs to the affinity mask comes * online. */ if (irqd_affinity_is_managed(data) && housekeeping_enabled(HK_TYPE_MANAGED_IRQ)) { const struct cpumask *hk_mask; hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ); cpumask_and(tmp_mask, mask, hk_mask); if (!cpumask_intersects(tmp_mask, cpu_online_mask)) prog_mask = mask; else prog_mask = tmp_mask; } else { prog_mask = mask; } /* * Make sure we only provide online CPUs to the irqchip, * unless we are being asked to force the affinity (in which * case we do as we are told). */ cpumask_and(tmp_mask, prog_mask, cpu_online_mask); if (!force && !cpumask_empty(tmp_mask)) ret = chip->irq_set_affinity(data, tmp_mask, force); else if (force) ret = chip->irq_set_affinity(data, mask, force); else ret = -EINVAL; switch (ret) { case IRQ_SET_MASK_OK: case IRQ_SET_MASK_OK_DONE: cpumask_copy(desc->irq_common_data.affinity, mask); fallthrough; case IRQ_SET_MASK_OK_NOCOPY: irq_validate_effective_affinity(data); irq_set_thread_affinity(desc); ret = 0; } return ret; } #ifdef CONFIG_GENERIC_PENDING_IRQ static inline int irq_set_affinity_pending(struct irq_data *data, const struct cpumask *dest) { struct irq_desc *desc = irq_data_to_desc(data); irqd_set_move_pending(data); irq_copy_pending(desc, dest); return 0; } #else static inline int irq_set_affinity_pending(struct irq_data *data, const struct cpumask *dest) { return -EBUSY; } #endif static int irq_try_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force) { int ret = irq_do_set_affinity(data, dest, force); /* * In case that the underlying vector management is busy and the * architecture supports the generic pending mechanism then utilize * this to avoid returning an error to user space. */ if (ret == -EBUSY && !force) ret = irq_set_affinity_pending(data, dest); return ret; } static bool irq_set_affinity_deactivated(struct irq_data *data, const struct cpumask *mask) { struct irq_desc *desc = irq_data_to_desc(data); /* * Handle irq chips which can handle affinity only in activated * state correctly * * If the interrupt is not yet activated, just store the affinity * mask and do not call the chip driver at all. On activation the * driver has to make sure anyway that the interrupt is in a * usable state so startup works. */ if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) || irqd_is_activated(data) || !irqd_affinity_on_activate(data)) return false; cpumask_copy(desc->irq_common_data.affinity, mask); irq_data_update_effective_affinity(data, mask); irqd_set(data, IRQD_AFFINITY_SET); return true; } int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, bool force) { struct irq_chip *chip = irq_data_get_irq_chip(data); struct irq_desc *desc = irq_data_to_desc(data); int ret = 0; if (!chip || !chip->irq_set_affinity) return -EINVAL; if (irq_set_affinity_deactivated(data, mask)) return 0; if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) { ret = irq_try_set_affinity(data, mask, force); } else { irqd_set_move_pending(data); irq_copy_pending(desc, mask); } if (desc->affinity_notify) { kref_get(&desc->affinity_notify->kref); if (!schedule_work(&desc->affinity_notify->work)) { /* Work was already scheduled, drop our extra ref */ kref_put(&desc->affinity_notify->kref, desc->affinity_notify->release); } } irqd_set(data, IRQD_AFFINITY_SET); return ret; } /** * irq_update_affinity_desc - Update affinity management for an interrupt * @irq: The interrupt number to update * @affinity: Pointer to the affinity descriptor * * This interface can be used to configure the affinity management of * interrupts which have been allocated already. * * There are certain limitations on when it may be used - attempts to use it * for when the kernel is configured for generic IRQ reservation mode (in * config GENERIC_IRQ_RESERVATION_MODE) will fail, as it may conflict with * managed/non-managed interrupt accounting. In addition, attempts to use it on * an interrupt which is already started or which has already been configured * as managed will also fail, as these mean invalid init state or double init. */ int irq_update_affinity_desc(unsigned int irq, struct irq_affinity_desc *affinity) { /* * Supporting this with the reservation scheme used by x86 needs * some more thought. Fail it for now. */ if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE)) return -EOPNOTSUPP; scoped_irqdesc_get_and_buslock(irq, 0) { struct irq_desc *desc = scoped_irqdesc; bool activated; /* Requires the interrupt to be shut down */ if (irqd_is_started(&desc->irq_data)) return -EBUSY; /* Interrupts which are already managed cannot be modified */ if (irqd_affinity_is_managed(&desc->irq_data)) return -EBUSY; /* * Deactivate the interrupt. That's required to undo * anything an earlier activation has established. */ activated = irqd_is_activated(&desc->irq_data); if (activated) irq_domain_deactivate_irq(&desc->irq_data); if (affinity->is_managed) { irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED); irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN); } cpumask_copy(desc->irq_common_data.affinity, &affinity->mask); /* Restore the activation state */ if (activated) irq_domain_activate_irq(&desc->irq_data, false); return 0; } return -EINVAL; } static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force) { struct irq_desc *desc = irq_to_desc(irq); if (!desc) return -EINVAL; guard(raw_spinlock_irqsave)(&desc->lock); return irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); } /** * irq_set_affinity - Set the irq affinity of a given irq * @irq: Interrupt to set affinity * @cpumask: cpumask * * Fails if cpumask does not contain an online CPU */ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) { return __irq_set_affinity(irq, cpumask, false); } EXPORT_SYMBOL_GPL(irq_set_affinity); /** * irq_force_affinity - Force the irq affinity of a given irq * @irq: Interrupt to set affinity * @cpumask: cpumask * * Same as irq_set_affinity, but without checking the mask against * online cpus. * * Solely for low level cpu hotplug code, where we need to make per * cpu interrupts affine before the cpu becomes online. */ int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) { return __irq_set_affinity(irq, cpumask, true); } EXPORT_SYMBOL_GPL(irq_force_affinity); int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m, bool setaffinity) { int ret = -EINVAL; scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { scoped_irqdesc->affinity_hint = m; ret = 0; } if (!ret && m && setaffinity) __irq_set_affinity(irq, m, false); return ret; } EXPORT_SYMBOL_GPL(__irq_apply_affinity_hint); static void irq_affinity_notify(struct work_struct *work) { struct irq_affinity_notify *notify = container_of(work, struct irq_affinity_notify, work); struct irq_desc *desc = irq_to_desc(notify->irq); cpumask_var_t cpumask; if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) goto out; scoped_guard(raw_spinlock_irqsave, &desc->lock) { if (irq_move_pending(&desc->irq_data)) irq_get_pending(cpumask, desc); else cpumask_copy(cpumask, desc->irq_common_data.affinity); } notify->notify(notify, cpumask); free_cpumask_var(cpumask); out: kref_put(&notify->kref, notify->release); } /** * irq_set_affinity_notifier - control notification of IRQ affinity changes * @irq: Interrupt for which to enable/disable notification * @notify: Context for notification, or %NULL to disable * notification. Function pointers must be initialised; * the other fields will be initialised by this function. * * Must be called in process context. Notification may only be enabled * after the IRQ is allocated and must be disabled before the IRQ is freed * using free_irq(). */ int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) { struct irq_desc *desc = irq_to_desc(irq); struct irq_affinity_notify *old_notify; /* The release function is promised process context */ might_sleep(); if (!desc || irq_is_nmi(desc)) return -EINVAL; /* Complete initialisation of *notify */ if (notify) { notify->irq = irq; kref_init(&notify->kref); INIT_WORK(&notify->work, irq_affinity_notify); } scoped_guard(raw_spinlock_irq, &desc->lock) { old_notify = desc->affinity_notify; desc->affinity_notify = notify; } if (old_notify) { if (cancel_work_sync(&old_notify->work)) { /* Pending work had a ref, put that one too */ kref_put(&old_notify->kref, old_notify->release); } kref_put(&old_notify->kref, old_notify->release); } return 0; } EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); #ifndef CONFIG_AUTO_IRQ_AFFINITY /* * Generic version of the affinity autoselector. */ int irq_setup_affinity(struct irq_desc *desc) { struct cpumask *set = irq_default_affinity; int node = irq_desc_get_node(desc); static DEFINE_RAW_SPINLOCK(mask_lock); static struct cpumask mask; /* Excludes PER_CPU and NO_BALANCE interrupts */ if (!__irq_can_set_affinity(desc)) return 0; guard(raw_spinlock)(&mask_lock); /* * Preserve the managed affinity setting and a userspace affinity * setup, but make sure that one of the targets is online. */ if (irqd_affinity_is_managed(&desc->irq_data) || irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { if (cpumask_intersects(desc->irq_common_data.affinity, cpu_online_mask)) set = desc->irq_common_data.affinity; else irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); } cpumask_and(&mask, cpu_online_mask, set); if (cpumask_empty(&mask)) cpumask_copy(&mask, cpu_online_mask); if (node != NUMA_NO_NODE) { const struct cpumask *nodemask = cpumask_of_node(node); /* make sure at least one of the cpus in nodemask is online */ if (cpumask_intersects(&mask, nodemask)) cpumask_and(&mask, &mask, nodemask); } return irq_do_set_affinity(&desc->irq_data, &mask, false); } #else /* Wrapper for ALPHA specific affinity selector magic */ int irq_setup_affinity(struct irq_desc *desc) { return irq_select_affinity(irq_desc_get_irq(desc)); } #endif /* CONFIG_AUTO_IRQ_AFFINITY */ #endif /* CONFIG_SMP */ /** * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt * @irq: interrupt number to set affinity * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU * specific data for percpu_devid interrupts * * This function uses the vCPU specific data to set the vCPU affinity for * an irq. The vCPU specific data is passed from outside, such as KVM. One * example code path is as below: KVM -> IOMMU -> irq_set_vcpu_affinity(). */ int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info) { scoped_irqdesc_get_and_lock(irq, 0) { struct irq_desc *desc = scoped_irqdesc; struct irq_data *data; struct irq_chip *chip; data = irq_desc_get_irq_data(desc); do { chip = irq_data_get_irq_chip(data); if (chip && chip->irq_set_vcpu_affinity) break; data = irqd_get_parent_data(data); } while (data); if (!data) return -ENOSYS; return chip->irq_set_vcpu_affinity(data, vcpu_info); } return -EINVAL; } EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity); void __disable_irq(struct irq_desc *desc) { if (!desc->depth++) irq_disable(desc); } static int __disable_irq_nosync(unsigned int irq) { scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { __disable_irq(scoped_irqdesc); return 0; } return -EINVAL; } /** * disable_irq_nosync - disable an irq without waiting * @irq: Interrupt to disable * * Disable the selected interrupt line. Disables and Enables are * nested. * Unlike disable_irq(), this function does not ensure existing * instances of the IRQ handler have completed before returning. * * This function may be called from IRQ context. */ void disable_irq_nosync(unsigned int irq) { __disable_irq_nosync(irq); } EXPORT_SYMBOL(disable_irq_nosync); /** * disable_irq - disable an irq and wait for completion * @irq: Interrupt to disable * * Disable the selected interrupt line. Enables and Disables are nested. * * This function waits for any pending IRQ handlers for this interrupt to * complete before returning. If you use this function while holding a * resource the IRQ handler may need you will deadlock. * * Can only be called from preemptible code as it might sleep when an * interrupt thread is associated to @irq. * */ void disable_irq(unsigned int irq) { might_sleep(); if (!__disable_irq_nosync(irq)) synchronize_irq(irq); } EXPORT_SYMBOL(disable_irq); /** * disable_hardirq - disables an irq and waits for hardirq completion * @irq: Interrupt to disable * * Disable the selected interrupt line. Enables and Disables are nested. * * This function waits for any pending hard IRQ handlers for this interrupt * to complete before returning. If you use this function while holding a * resource the hard IRQ handler may need you will deadlock. * * When used to optimistically disable an interrupt from atomic context the * return value must be checked. * * Returns: false if a threaded handler is active. * * This function may be called - with care - from IRQ context. */ bool disable_hardirq(unsigned int irq) { if (!__disable_irq_nosync(irq)) return synchronize_hardirq(irq); return false; } EXPORT_SYMBOL_GPL(disable_hardirq); /** * disable_nmi_nosync - disable an nmi without waiting * @irq: Interrupt to disable * * Disable the selected interrupt line. Disables and enables are nested. * * The interrupt to disable must have been requested through request_nmi. * Unlike disable_nmi(), this function does not ensure existing * instances of the IRQ handler have completed before returning. */ void disable_nmi_nosync(unsigned int irq) { disable_irq_nosync(irq); } void __enable_irq(struct irq_desc *desc) { switch (desc->depth) { case 0: err_out: WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq_desc_get_irq(desc)); break; case 1: { if (desc->istate & IRQS_SUSPENDED) goto err_out; /* Prevent probing on this irq: */ irq_settings_set_noprobe(desc); /* * Call irq_startup() not irq_enable() here because the * interrupt might be marked NOAUTOEN so irq_startup() * needs to be invoked when it gets enabled the first time. * This is also required when __enable_irq() is invoked for * a managed and shutdown interrupt from the S3 resume * path. * * If it was already started up, then irq_startup() will * invoke irq_enable() under the hood. */ irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE); break; } default: desc->depth--; } } /** * enable_irq - enable handling of an irq * @irq: Interrupt to enable * * Undoes the effect of one call to disable_irq(). If this matches the * last disable, processing of interrupts on this IRQ line is re-enabled. * * This function may be called from IRQ context only when * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! */ void enable_irq(unsigned int irq) { scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { struct irq_desc *desc = scoped_irqdesc; if (WARN(!desc->irq_data.chip, "enable_irq before setup/request_irq: irq %u\n", irq)) return; __enable_irq(desc); } } EXPORT_SYMBOL(enable_irq); /** * enable_nmi - enable handling of an nmi * @irq: Interrupt to enable * * The interrupt to enable must have been requested through request_nmi. * Undoes the effect of one call to disable_nmi(). If this matches the last * disable, processing of interrupts on this IRQ line is re-enabled. */ void enable_nmi(unsigned int irq) { enable_irq(irq); } static int set_irq_wake_real(unsigned int irq, unsigned int on) { struct irq_desc *desc = irq_to_desc(irq); int ret = -ENXIO; if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) return 0; if (desc->irq_data.chip->irq_set_wake) ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); return ret; } /** * irq_set_irq_wake - control irq power management wakeup * @irq: interrupt to control * @on: enable/disable power management wakeup * * Enable/disable power management wakeup mode, which is disabled by * default. Enables and disables must match, just as they match for * non-wakeup mode support. * * Wakeup mode lets this IRQ wake the system from sleep states like * "suspend to RAM". * * Note: irq enable/disable state is completely orthogonal to the * enable/disable state of irq wake. An irq can be disabled with * disable_irq() and still wake the system as long as the irq has wake * enabled. If this does not hold, then the underlying irq chip and the * related driver need to be investigated. */ int irq_set_irq_wake(unsigned int irq, unsigned int on) { scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { struct irq_desc *desc = scoped_irqdesc; int ret = 0; /* Don't use NMIs as wake up interrupts please */ if (irq_is_nmi(desc)) return -EINVAL; /* * wakeup-capable irqs can be shared between drivers that * don't need to have the same sleep mode behaviors. */ if (on) { if (desc->wake_depth++ == 0) { ret = set_irq_wake_real(irq, on); if (ret) desc->wake_depth = 0; else irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); } } else { if (desc->wake_depth == 0) { WARN(1, "Unbalanced IRQ %d wake disable\n", irq); } else if (--desc->wake_depth == 0) { ret = set_irq_wake_real(irq, on); if (ret) desc->wake_depth = 1; else irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); } } return ret; } return -EINVAL; } EXPORT_SYMBOL(irq_set_irq_wake); /* * Internal function that tells the architecture code whether a * particular irq has been exclusively allocated or is available * for driver use. */ bool can_request_irq(unsigned int irq, unsigned long irqflags) { scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { struct irq_desc *desc = scoped_irqdesc; if (irq_settings_can_request(desc)) { if (!desc->action || irqflags & desc->action->flags & IRQF_SHARED) return true; } } return false; } int __irq_set_trigger(struct irq_desc *desc, unsigned long flags) { struct irq_chip *chip = desc->irq_data.chip; int ret, unmask = 0; if (!chip || !chip->irq_set_type) { /* * IRQF_TRIGGER_* but the PIC does not support multiple * flow-types? */ pr_debug("No set_type function for IRQ %d (%s)\n", irq_desc_get_irq(desc), chip ? (chip->name ? : "unknown") : "unknown"); return 0; } if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { if (!irqd_irq_masked(&desc->irq_data)) mask_irq(desc); if (!irqd_irq_disabled(&desc->irq_data)) unmask = 1; } /* Mask all flags except trigger mode */ flags &= IRQ_TYPE_SENSE_MASK; ret = chip->irq_set_type(&desc->irq_data, flags); switch (ret) { case IRQ_SET_MASK_OK: case IRQ_SET_MASK_OK_DONE: irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); irqd_set(&desc->irq_data, flags); fallthrough; case IRQ_SET_MASK_OK_NOCOPY: flags = irqd_get_trigger_type(&desc->irq_data); irq_settings_set_trigger_mask(desc, flags); irqd_clear(&desc->irq_data, IRQD_LEVEL); irq_settings_clr_level(desc); if (flags & IRQ_TYPE_LEVEL_MASK) { irq_settings_set_level(desc); irqd_set(&desc->irq_data, IRQD_LEVEL); } ret = 0; break; default: pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n", flags, irq_desc_get_irq(desc), chip->irq_set_type); } if (unmask) unmask_irq(desc); return ret; } #ifdef CONFIG_HARDIRQS_SW_RESEND int irq_set_parent(int irq, int parent_irq) { scoped_irqdesc_get_and_lock(irq, 0) { scoped_irqdesc->parent_irq = parent_irq; return 0; } return -EINVAL; } EXPORT_SYMBOL_GPL(irq_set_parent); #endif /* * Default primary interrupt handler for threaded interrupts. Is * assigned as primary handler when request_threaded_irq is called * with handler == NULL. Useful for oneshot interrupts. */ static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) { return IRQ_WAKE_THREAD; } /* * Primary handler for nested threaded interrupts. Should never be * called. */ static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) { WARN(1, "Primary handler called for nested irq %d\n", irq); return IRQ_NONE; } static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id) { WARN(1, "Secondary action handler called for irq %d\n", irq); return IRQ_NONE; } #ifdef CONFIG_SMP /* * Check whether we need to change the affinity of the interrupt thread. */ static void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { cpumask_var_t mask; if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) return; __set_current_state(TASK_RUNNING); /* * In case we are out of memory we set IRQTF_AFFINITY again and * try again next time */ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { set_bit(IRQTF_AFFINITY, &action->thread_flags); return; } scoped_guard(raw_spinlock_irq, &desc->lock) { const struct cpumask *m; m = irq_data_get_effective_affinity_mask(&desc->irq_data); cpumask_copy(mask, m); } set_cpus_allowed_ptr(current, mask); free_cpumask_var(mask); } #else static inline void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } #endif static int irq_wait_for_interrupt(struct irq_desc *desc, struct irqaction *action) { for (;;) { set_current_state(TASK_INTERRUPTIBLE); irq_thread_check_affinity(desc, action); if (kthread_should_stop()) { /* may need to run one last time */ if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) { __set_current_state(TASK_RUNNING); return 0; } __set_current_state(TASK_RUNNING); return -1; } if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) { __set_current_state(TASK_RUNNING); return 0; } schedule(); } } /* * Oneshot interrupts keep the irq line masked until the threaded * handler finished. unmask if the interrupt has not been disabled and * is marked MASKED. */ static void irq_finalize_oneshot(struct irq_desc *desc, struct irqaction *action) { if (!(desc->istate & IRQS_ONESHOT) || action->handler == irq_forced_secondary_handler) return; again: chip_bus_lock(desc); raw_spin_lock_irq(&desc->lock); /* * Implausible though it may be we need to protect us against * the following scenario: * * The thread is faster done than the hard interrupt handler * on the other CPU. If we unmask the irq line then the * interrupt can come in again and masks the line, leaves due * to IRQS_INPROGRESS and the irq line is masked forever. * * This also serializes the state of shared oneshot handlers * versus "desc->threads_oneshot |= action->thread_mask;" in * irq_wake_thread(). See the comment there which explains the * serialization. */ if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { raw_spin_unlock_irq(&desc->lock); chip_bus_sync_unlock(desc); cpu_relax(); goto again; } /* * Now check again, whether the thread should run. Otherwise * we would clear the threads_oneshot bit of this thread which * was just set. */ if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) goto out_unlock; desc->threads_oneshot &= ~action->thread_mask; if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && irqd_irq_masked(&desc->irq_data)) unmask_threaded_irq(desc); out_unlock: raw_spin_unlock_irq(&desc->lock); chip_bus_sync_unlock(desc); } /* * Interrupts explicitly requested as threaded interrupts want to be * preemptible - many of them need to sleep and wait for slow busses to * complete. */ static irqreturn_t irq_thread_fn(struct irq_desc *desc, struct irqaction *action) { irqreturn_t ret = action->thread_fn(action->irq, action->dev_id); if (ret == IRQ_HANDLED) atomic_inc(&desc->threads_handled); irq_finalize_oneshot(desc, action); return ret; } /* * Interrupts which are not explicitly requested as threaded * interrupts rely on the implicit bh/preempt disable of the hard irq * context. So we need to disable bh here to avoid deadlocks and other * side effects. */ static irqreturn_t irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) { irqreturn_t ret; local_bh_disable(); if (!IS_ENABLED(CONFIG_PREEMPT_RT)) local_irq_disable(); ret = irq_thread_fn(desc, action); if (!IS_ENABLED(CONFIG_PREEMPT_RT)) local_irq_enable(); local_bh_enable(); return ret; } void wake_threads_waitq(struct irq_desc *desc) { if (atomic_dec_and_test(&desc->threads_active)) wake_up(&desc->wait_for_threads); } static void irq_thread_dtor(struct callback_head *unused) { struct task_struct *tsk = current; struct irq_desc *desc; struct irqaction *action; if (WARN_ON_ONCE(!(current->flags & PF_EXITING))) return; action = kthread_data(tsk); pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", tsk->comm, tsk->pid, action->irq); desc = irq_to_desc(action->irq); /* * If IRQTF_RUNTHREAD is set, we need to decrement * desc->threads_active and wake possible waiters. */ if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) wake_threads_waitq(desc); /* Prevent a stale desc->threads_oneshot */ irq_finalize_oneshot(desc, action); } static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action) { struct irqaction *secondary = action->secondary; if (WARN_ON_ONCE(!secondary)) return; guard(raw_spinlock_irq)(&desc->lock); __irq_wake_thread(desc, secondary); } /* * Internal function to notify that a interrupt thread is ready. */ static void irq_thread_set_ready(struct irq_desc *desc, struct irqaction *action) { set_bit(IRQTF_READY, &action->thread_flags); wake_up(&desc->wait_for_threads); } /* * Internal function to wake up a interrupt thread and wait until it is * ready. */ static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc, struct irqaction *action) { if (!action || !action->thread) return; wake_up_process(action->thread); wait_event(desc->wait_for_threads, test_bit(IRQTF_READY, &action->thread_flags)); } /* * Interrupt handler thread */ static int irq_thread(void *data) { struct callback_head on_exit_work; struct irqaction *action = data; struct irq_desc *desc = irq_to_desc(action->irq); irqreturn_t (*handler_fn)(struct irq_desc *desc, struct irqaction *action); irq_thread_set_ready(desc, action); if (action->handler == irq_forced_secondary_handler) sched_set_fifo_secondary(current); else sched_set_fifo(current); if (force_irqthreads() && test_bit(IRQTF_FORCED_THREAD, &action->thread_flags)) handler_fn = irq_forced_thread_fn; else handler_fn = irq_thread_fn; init_task_work(&on_exit_work, irq_thread_dtor); task_work_add(current, &on_exit_work, TWA_NONE); while (!irq_wait_for_interrupt(desc, action)) { irqreturn_t action_ret; action_ret = handler_fn(desc, action); if (action_ret == IRQ_WAKE_THREAD) irq_wake_secondary(desc, action); wake_threads_waitq(desc); } /* * This is the regular exit path. __free_irq() is stopping the * thread via kthread_stop() after calling * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the * oneshot mask bit can be set. */ task_work_cancel_func(current, irq_thread_dtor); return 0; } /** * irq_wake_thread - wake the irq thread for the action identified by dev_id * @irq: Interrupt line * @dev_id: Device identity for which the thread should be woken */ void irq_wake_thread(unsigned int irq, void *dev_id) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action; if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) return; guard(raw_spinlock_irqsave)(&desc->lock); for_each_action_of_desc(desc, action) { if (action->dev_id == dev_id) { if (action->thread) __irq_wake_thread(desc, action); break; } } } EXPORT_SYMBOL_GPL(irq_wake_thread); static int irq_setup_forced_threading(struct irqaction *new) { if (!force_irqthreads()) return 0; if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) return 0; /* * No further action required for interrupts which are requested as * threaded interrupts already */ if (new->handler == irq_default_primary_handler) return 0; new->flags |= IRQF_ONESHOT; /* * Handle the case where we have a real primary handler and a * thread handler. We force thread them as well by creating a * secondary action. */ if (new->handler && new->thread_fn) { /* Allocate the secondary action */ new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL); if (!new->secondary) return -ENOMEM; new->secondary->handler = irq_forced_secondary_handler; new->secondary->thread_fn = new->thread_fn; new->secondary->dev_id = new->dev_id; new->secondary->irq = new->irq; new->secondary->name = new->name; } /* Deal with the primary handler */ set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); new->thread_fn = new->handler; new->handler = irq_default_primary_handler; return 0; } static int irq_request_resources(struct irq_desc *desc) { struct irq_data *d = &desc->irq_data; struct irq_chip *c = d->chip; return c->irq_request_resources ? c->irq_request_resources(d) : 0; } static void irq_release_resources(struct irq_desc *desc) { struct irq_data *d = &desc->irq_data; struct irq_chip *c = d->chip; if (c->irq_release_resources) c->irq_release_resources(d); } static bool irq_supports_nmi(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY /* Only IRQs directly managed by the root irqchip can be set as NMI */ if (d->parent_data) return false; #endif /* Don't support NMIs for chips behind a slow bus */ if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock) return false; return d->chip->flags & IRQCHIP_SUPPORTS_NMI; } static int irq_nmi_setup(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); struct irq_chip *c = d->chip; return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL; } static void irq_nmi_teardown(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); struct irq_chip *c = d->chip; if (c->irq_nmi_teardown) c->irq_nmi_teardown(d); } static int setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary) { struct task_struct *t; if (!secondary) { t = kthread_create(irq_thread, new, "irq/%d-%s", irq, new->name); } else { t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq, new->name); } if (IS_ERR(t)) return PTR_ERR(t); /* * We keep the reference to the task struct even if * the thread dies to avoid that the interrupt code * references an already freed task_struct. */ new->thread = get_task_struct(t); /* * The affinity can not be established yet, but it will be once the * interrupt is enabled. Delay and defer the actual setting to the * thread itself once it is ready to run. In the meantime, prevent * it from ever being re-affined directly by cpuset or * housekeeping. The proper way to do it is to re-affine the whole * vector. */ kthread_bind_mask(t, cpu_possible_mask); /* * Ensure the thread adjusts the affinity once it reaches the * thread function. */ set_bit(IRQTF_AFFINITY, &new->thread_flags); return 0; } static bool valid_percpu_irqaction(struct irqaction *old, struct irqaction *new) { do { if (cpumask_intersects(old->affinity, new->affinity) || old->percpu_dev_id == new->percpu_dev_id) return false; old = old->next; } while (old); return true; } /* * Internal function to register an irqaction - typically used to * allocate special interrupts that are part of the architecture. * * Locking rules: * * desc->request_mutex Provides serialization against a concurrent free_irq() * chip_bus_lock Provides serialization for slow bus operations * desc->lock Provides serialization against hard interrupts * * chip_bus_lock and desc->lock are sufficient for all other management and * interrupt related functions. desc->request_mutex solely serializes * request/free_irq(). */ static int __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) { struct irqaction *old, **old_ptr; unsigned long flags, thread_mask = 0; int ret, nested, shared = 0; bool per_cpu_devid; if (!desc) return -EINVAL; if (desc->irq_data.chip == &no_irq_chip) return -ENOSYS; if (!try_module_get(desc->owner)) return -ENODEV; per_cpu_devid = irq_settings_is_per_cpu_devid(desc); new->irq = irq; /* * If the trigger type is not specified by the caller, * then use the default for this interrupt. */ if (!(new->flags & IRQF_TRIGGER_MASK)) new->flags |= irqd_get_trigger_type(&desc->irq_data); /* * Check whether the interrupt nests into another interrupt * thread. */ nested = irq_settings_is_nested_thread(desc); if (nested) { if (!new->thread_fn) { ret = -EINVAL; goto out_mput; } /* * Replace the primary handler which was provided from * the driver for non nested interrupt handling by the * dummy function which warns when called. */ new->handler = irq_nested_primary_handler; } else { if (irq_settings_can_thread(desc)) { ret = irq_setup_forced_threading(new); if (ret) goto out_mput; } } /* * Create a handler thread when a thread function is supplied * and the interrupt does not nest into another interrupt * thread. */ if (new->thread_fn && !nested) { ret = setup_irq_thread(new, irq, false); if (ret) goto out_mput; if (new->secondary) { ret = setup_irq_thread(new->secondary, irq, true); if (ret) goto out_thread; } } /* * Drivers are often written to work w/o knowledge about the * underlying irq chip implementation, so a request for a * threaded irq without a primary hard irq context handler * requires the ONESHOT flag to be set. Some irq chips like * MSI based interrupts are per se one shot safe. Check the * chip flags, so we can avoid the unmask dance at the end of * the threaded handler for those. */ if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) new->flags &= ~IRQF_ONESHOT; /* * Protects against a concurrent __free_irq() call which might wait * for synchronize_hardirq() to complete without holding the optional * chip bus lock and desc->lock. Also protects against handing out * a recycled oneshot thread_mask bit while it's still in use by * its previous owner. */ mutex_lock(&desc->request_mutex); /* * Acquire bus lock as the irq_request_resources() callback below * might rely on the serialization or the magic power management * functions which are abusing the irq_bus_lock() callback, */ chip_bus_lock(desc); /* First installed action requests resources. */ if (!desc->action) { ret = irq_request_resources(desc); if (ret) { pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n", new->name, irq, desc->irq_data.chip->name); goto out_bus_unlock; } } /* * The following block of code has to be executed atomically * protected against a concurrent interrupt and any of the other * management calls which are not serialized via * desc->request_mutex or the optional bus lock. */ raw_spin_lock_irqsave(&desc->lock, flags); old_ptr = &desc->action; old = *old_ptr; if (old) { /* * Can't share interrupts unless both agree to and are * the same type (level, edge, polarity). So both flag * fields must have IRQF_SHARED set and the bits which * set the trigger type must match. Also all must * agree on ONESHOT. * Interrupt lines used for NMIs cannot be shared. */ unsigned int oldtype; if (irq_is_nmi(desc) && !per_cpu_devid) { pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n", new->name, irq, desc->irq_data.chip->name); ret = -EINVAL; goto out_unlock; } if (per_cpu_devid && !valid_percpu_irqaction(old, new)) { pr_err("Overlapping affinities for %s (irq %d) on irqchip %s.\n", new->name, irq, desc->irq_data.chip->name); ret = -EINVAL; goto out_unlock; } /* * If nobody did set the configuration before, inherit * the one provided by the requester. */ if (irqd_trigger_type_was_set(&desc->irq_data)) { oldtype = irqd_get_trigger_type(&desc->irq_data); } else { oldtype = new->flags & IRQF_TRIGGER_MASK; irqd_set_trigger_type(&desc->irq_data, oldtype); } if (!((old->flags & new->flags) & IRQF_SHARED) || (oldtype != (new->flags & IRQF_TRIGGER_MASK))) goto mismatch; if ((old->flags & IRQF_ONESHOT) && (new->flags & IRQF_COND_ONESHOT)) new->flags |= IRQF_ONESHOT; else if ((old->flags ^ new->flags) & IRQF_ONESHOT) goto mismatch; /* All handlers must agree on per-cpuness */ if ((old->flags & IRQF_PERCPU) != (new->flags & IRQF_PERCPU)) goto mismatch; /* add new interrupt at end of irq queue */ do { /* * Or all existing action->thread_mask bits, * so we can find the next zero bit for this * new action. */ thread_mask |= old->thread_mask; old_ptr = &old->next; old = *old_ptr; } while (old); shared = 1; } /* * Setup the thread mask for this irqaction for ONESHOT. For * !ONESHOT irqs the thread mask is 0 so we can avoid a * conditional in irq_wake_thread(). */ if (new->flags & IRQF_ONESHOT) { /* * Unlikely to have 32 resp 64 irqs sharing one line, * but who knows. */ if (thread_mask == ~0UL) { ret = -EBUSY; goto out_unlock; } /* * The thread_mask for the action is or'ed to * desc->thread_active to indicate that the * IRQF_ONESHOT thread handler has been woken, but not * yet finished. The bit is cleared when a thread * completes. When all threads of a shared interrupt * line have completed desc->threads_active becomes * zero and the interrupt line is unmasked. See * handle.c:irq_wake_thread() for further information. * * If no thread is woken by primary (hard irq context) * interrupt handlers, then desc->threads_active is * also checked for zero to unmask the irq line in the * affected hard irq flow handlers * (handle_[fasteoi|level]_irq). * * The new action gets the first zero bit of * thread_mask assigned. See the loop above which or's * all existing action->thread_mask bits. */ new->thread_mask = 1UL << ffz(thread_mask); } else if (new->handler == irq_default_primary_handler && !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { /* * The interrupt was requested with handler = NULL, so * we use the default primary handler for it. But it * does not have the oneshot flag set. In combination * with level interrupts this is deadly, because the * default primary handler just wakes the thread, then * the irq lines is reenabled, but the device still * has the level irq asserted. Rinse and repeat.... * * While this works for edge type interrupts, we play * it safe and reject unconditionally because we can't * say for sure which type this interrupt really * has. The type flags are unreliable as the * underlying chip implementation can override them. */ pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n", new->name, irq); ret = -EINVAL; goto out_unlock; } if (!shared) { /* Setup the type (level, edge polarity) if configured: */ if (new->flags & IRQF_TRIGGER_MASK) { ret = __irq_set_trigger(desc, new->flags & IRQF_TRIGGER_MASK); if (ret) goto out_unlock; } /* * Activate the interrupt. That activation must happen * independently of IRQ_NOAUTOEN. request_irq() can fail * and the callers are supposed to handle * that. enable_irq() of an interrupt requested with * IRQ_NOAUTOEN is not supposed to fail. The activation * keeps it in shutdown mode, it merily associates * resources if necessary and if that's not possible it * fails. Interrupts which are in managed shutdown mode * will simply ignore that activation request. */ ret = irq_activate(desc); if (ret) goto out_unlock; desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ IRQS_ONESHOT | IRQS_WAITING); irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); if (new->flags & IRQF_PERCPU) { irqd_set(&desc->irq_data, IRQD_PER_CPU); irq_settings_set_per_cpu(desc); if (new->flags & IRQF_NO_DEBUG) irq_settings_set_no_debug(desc); } if (noirqdebug) irq_settings_set_no_debug(desc); if (new->flags & IRQF_ONESHOT) desc->istate |= IRQS_ONESHOT; /* Exclude IRQ from balancing if requested */ if (new->flags & IRQF_NOBALANCING) { irq_settings_set_no_balancing(desc); irqd_set(&desc->irq_data, IRQD_NO_BALANCING); } if (!(new->flags & IRQF_NO_AUTOEN) && irq_settings_can_autoenable(desc)) { irq_startup(desc, IRQ_RESEND, IRQ_START_COND); } else if (!per_cpu_devid) { /* * Shared interrupts do not go well with disabling * auto enable. The sharing interrupt might request * it while it's still disabled and then wait for * interrupts forever. */ WARN_ON_ONCE(new->flags & IRQF_SHARED); /* Undo nested disables: */ desc->depth = 1; } } else if (new->flags & IRQF_TRIGGER_MASK) { unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; unsigned int omsk = irqd_get_trigger_type(&desc->irq_data); if (nmsk != omsk) /* hope the handler works with current trigger mode */ pr_warn("irq %d uses trigger mode %u; requested %u\n", irq, omsk, nmsk); } *old_ptr = new; irq_pm_install_action(desc, new); /* Reset broken irq detection when installing new handler */ desc->irq_count = 0; desc->irqs_unhandled = 0; /* * Check whether we disabled the irq via the spurious handler * before. Reenable it and give it another chance. */ if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { desc->istate &= ~IRQS_SPURIOUS_DISABLED; __enable_irq(desc); } raw_spin_unlock_irqrestore(&desc->lock, flags); chip_bus_sync_unlock(desc); mutex_unlock(&desc->request_mutex); irq_setup_timings(desc, new); wake_up_and_wait_for_irq_thread_ready(desc, new); wake_up_and_wait_for_irq_thread_ready(desc, new->secondary); register_irq_proc(irq, desc); new->dir = NULL; register_handler_proc(irq, new); return 0; mismatch: if (!(new->flags & IRQF_PROBE_SHARED)) { pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n", irq, new->flags, new->name, old->flags, old->name); #ifdef CONFIG_DEBUG_SHIRQ dump_stack(); #endif } ret = -EBUSY; out_unlock: raw_spin_unlock_irqrestore(&desc->lock, flags); if (!desc->action) irq_release_resources(desc); out_bus_unlock: chip_bus_sync_unlock(desc); mutex_unlock(&desc->request_mutex); out_thread: if (new->thread) { struct task_struct *t = new->thread; new->thread = NULL; kthread_stop_put(t); } if (new->secondary && new->secondary->thread) { struct task_struct *t = new->secondary->thread; new->secondary->thread = NULL; kthread_stop_put(t); } out_mput: module_put(desc->owner); return ret; } /* * Internal function to unregister an irqaction - used to free * regular and special interrupts that are part of the architecture. */ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id) { unsigned irq = desc->irq_data.irq; struct irqaction *action, **action_ptr; unsigned long flags; WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); mutex_lock(&desc->request_mutex); chip_bus_lock(desc); raw_spin_lock_irqsave(&desc->lock, flags); /* * There can be multiple actions per IRQ descriptor, find the right * one based on the dev_id: */ action_ptr = &desc->action; for (;;) { action = *action_ptr; if (!action) { WARN(1, "Trying to free already-free IRQ %d\n", irq); raw_spin_unlock_irqrestore(&desc->lock, flags); chip_bus_sync_unlock(desc); mutex_unlock(&desc->request_mutex); return NULL; } if (action->dev_id == dev_id) break; action_ptr = &action->next; } /* Found it - now remove it from the list of entries: */ *action_ptr = action->next; irq_pm_remove_action(desc, action); /* If this was the last handler, shut down the IRQ line: */ if (!desc->action) { irq_settings_clr_disable_unlazy(desc); /* Only shutdown. Deactivate after synchronize_hardirq() */ irq_shutdown(desc); } #ifdef CONFIG_SMP /* make sure affinity_hint is cleaned up */ if (WARN_ON_ONCE(desc->affinity_hint)) desc->affinity_hint = NULL; #endif raw_spin_unlock_irqrestore(&desc->lock, flags); /* * Drop bus_lock here so the changes which were done in the chip * callbacks above are synced out to the irq chips which hang * behind a slow bus (I2C, SPI) before calling synchronize_hardirq(). * * Aside of that the bus_lock can also be taken from the threaded * handler in irq_finalize_oneshot() which results in a deadlock * because kthread_stop() would wait forever for the thread to * complete, which is blocked on the bus lock. * * The still held desc->request_mutex() protects against a * concurrent request_irq() of this irq so the release of resources * and timing data is properly serialized. */ chip_bus_sync_unlock(desc); unregister_handler_proc(irq, action); /* * Make sure it's not being used on another CPU and if the chip * supports it also make sure that there is no (not yet serviced) * interrupt in flight at the hardware level. */ __synchronize_irq(desc); #ifdef CONFIG_DEBUG_SHIRQ /* * It's a shared IRQ -- the driver ought to be prepared for an IRQ * event to happen even now it's being freed, so let's make sure that * is so by doing an extra call to the handler .... * * ( We do this after actually deregistering it, to make sure that a * 'real' IRQ doesn't run in parallel with our fake. ) */ if (action->flags & IRQF_SHARED) { local_irq_save(flags); action->handler(irq, dev_id); local_irq_restore(flags); } #endif /* * The action has already been removed above, but the thread writes * its oneshot mask bit when it completes. Though request_mutex is * held across this which prevents __setup_irq() from handing out * the same bit to a newly requested action. */ if (action->thread) { kthread_stop_put(action->thread); if (action->secondary && action->secondary->thread) kthread_stop_put(action->secondary->thread); } /* Last action releases resources */ if (!desc->action) { /* * Reacquire bus lock as irq_release_resources() might * require it to deallocate resources over the slow bus. */ chip_bus_lock(desc); /* * There is no interrupt on the fly anymore. Deactivate it * completely. */ scoped_guard(raw_spinlock_irqsave, &desc->lock) irq_domain_deactivate_irq(&desc->irq_data); irq_release_resources(desc); chip_bus_sync_unlock(desc); irq_remove_timings(desc); } mutex_unlock(&desc->request_mutex); irq_chip_pm_put(&desc->irq_data); module_put(desc->owner); kfree(action->secondary); return action; } /** * free_irq - free an interrupt allocated with request_irq * @irq: Interrupt line to free * @dev_id: Device identity to free * * Remove an interrupt handler. The handler is removed and if the interrupt * line is no longer in use by any driver it is disabled. On a shared IRQ * the caller must ensure the interrupt is disabled on the card it drives * before calling this function. The function does not return until any * executing interrupts for this IRQ have completed. * * This function must not be called from interrupt context. * * Returns the devname argument passed to request_irq. */ const void *free_irq(unsigned int irq, void *dev_id) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action; const char *devname; if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) return NULL; #ifdef CONFIG_SMP if (WARN_ON(desc->affinity_notify)) desc->affinity_notify = NULL; #endif action = __free_irq(desc, dev_id); if (!action) return NULL; devname = action->name; kfree(action); return devname; } EXPORT_SYMBOL(free_irq); /* This function must be called with desc->lock held */ static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc) { const char *devname = NULL; desc->istate &= ~IRQS_NMI; if (!WARN_ON(desc->action == NULL)) { irq_pm_remove_action(desc, desc->action); devname = desc->action->name; unregister_handler_proc(irq, desc->action); kfree(desc->action); desc->action = NULL; } irq_settings_clr_disable_unlazy(desc); irq_shutdown_and_deactivate(desc); irq_release_resources(desc); irq_chip_pm_put(&desc->irq_data); module_put(desc->owner); return devname; } const void *free_nmi(unsigned int irq, void *dev_id) { struct irq_desc *desc = irq_to_desc(irq); if (!desc || WARN_ON(!irq_is_nmi(desc))) return NULL; if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) return NULL; /* NMI still enabled */ if (WARN_ON(desc->depth == 0)) disable_nmi_nosync(irq); guard(raw_spinlock_irqsave)(&desc->lock); irq_nmi_teardown(desc); return __cleanup_nmi(irq, desc); } /** * request_threaded_irq - allocate an interrupt line * @irq: Interrupt line to allocate * @handler: Function to be called when the IRQ occurs. * Primary handler for threaded interrupts. * If handler is NULL and thread_fn != NULL * the default primary handler is installed. * @thread_fn: Function called from the irq handler thread * If NULL, no irq thread is created * @irqflags: Interrupt type flags * @devname: An ascii name for the claiming device * @dev_id: A cookie passed back to the handler function * * This call allocates interrupt resources and enables the interrupt line * and IRQ handling. From the point this call is made your handler function * may be invoked. Since your handler function must clear any interrupt the * board raises, you must take care both to initialise your hardware and to * set up the interrupt handler in the right order. * * If you want to set up a threaded irq handler for your device then you * need to supply @handler and @thread_fn. @handler is still called in hard * interrupt context and has to check whether the interrupt originates from * the device. If yes it needs to disable the interrupt on the device and * return IRQ_WAKE_THREAD which will wake up the handler thread and run * @thread_fn. This split handler design is necessary to support shared * interrupts. * * @dev_id must be globally unique. Normally the address of the device data * structure is used as the cookie. Since the handler receives this value * it makes sense to use it. * * If your interrupt is shared you must pass a non NULL dev_id as this is * required when freeing the interrupt. * * Flags: * * IRQF_SHARED Interrupt is shared * IRQF_TRIGGER_* Specify active edge(s) or level * IRQF_ONESHOT Run thread_fn with interrupt line masked */ int request_threaded_irq(unsigned int irq, irq_handler_t handler, irq_handler_t thread_fn, unsigned long irqflags, const char *devname, void *dev_id) { struct irqaction *action; struct irq_desc *desc; int retval; if (irq == IRQ_NOTCONNECTED) return -ENOTCONN; /* * Sanity-check: shared interrupts must pass in a real dev-ID, * otherwise we'll have trouble later trying to figure out * which interrupt is which (messes up the interrupt freeing * logic etc). * * Also shared interrupts do not go well with disabling auto enable. * The sharing interrupt might request it while it's still disabled * and then wait for interrupts forever. * * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and * it cannot be set along with IRQF_NO_SUSPEND. */ if (((irqflags & IRQF_SHARED) && !dev_id) || ((irqflags & IRQF_SHARED) && (irqflags & IRQF_NO_AUTOEN)) || (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) || ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND))) return -EINVAL; desc = irq_to_desc(irq); if (!desc) return -EINVAL; if (!irq_settings_can_request(desc) || WARN_ON(irq_settings_is_per_cpu_devid(desc))) return -EINVAL; if (!handler) { if (!thread_fn) return -EINVAL; handler = irq_default_primary_handler; } action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); if (!action) return -ENOMEM; action->handler = handler; action->thread_fn = thread_fn; action->flags = irqflags; action->name = devname; action->dev_id = dev_id; retval = irq_chip_pm_get(&desc->irq_data); if (retval < 0) { kfree(action); return retval; } retval = __setup_irq(irq, desc, action); if (retval) { irq_chip_pm_put(&desc->irq_data); kfree(action->secondary); kfree(action); } #ifdef CONFIG_DEBUG_SHIRQ_FIXME if (!retval && (irqflags & IRQF_SHARED)) { /* * It's a shared IRQ -- the driver ought to be prepared for it * to happen immediately, so let's make sure.... * We disable the irq to make sure that a 'real' IRQ doesn't * run in parallel with our fake. */ unsigned long flags; disable_irq(irq); local_irq_save(flags); handler(irq, dev_id); local_irq_restore(flags); enable_irq(irq); } #endif return retval; } EXPORT_SYMBOL(request_threaded_irq); /** * request_any_context_irq - allocate an interrupt line * @irq: Interrupt line to allocate * @handler: Function to be called when the IRQ occurs. * Threaded handler for threaded interrupts. * @flags: Interrupt type flags * @name: An ascii name for the claiming device * @dev_id: A cookie passed back to the handler function * * This call allocates interrupt resources and enables the interrupt line * and IRQ handling. It selects either a hardirq or threaded handling * method depending on the context. * * Returns: On failure, it returns a negative value. On success, it returns either * IRQC_IS_HARDIRQ or IRQC_IS_NESTED. */ int request_any_context_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, const char *name, void *dev_id) { struct irq_desc *desc; int ret; if (irq == IRQ_NOTCONNECTED) return -ENOTCONN; desc = irq_to_desc(irq); if (!desc) return -EINVAL; if (irq_settings_is_nested_thread(desc)) { ret = request_threaded_irq(irq, NULL, handler, flags, name, dev_id); return !ret ? IRQC_IS_NESTED : ret; } ret = request_irq(irq, handler, flags, name, dev_id); return !ret ? IRQC_IS_HARDIRQ : ret; } EXPORT_SYMBOL_GPL(request_any_context_irq); /** * request_nmi - allocate an interrupt line for NMI delivery * @irq: Interrupt line to allocate * @handler: Function to be called when the IRQ occurs. * Threaded handler for threaded interrupts. * @irqflags: Interrupt type flags * @name: An ascii name for the claiming device * @dev_id: A cookie passed back to the handler function * * This call allocates interrupt resources and enables the interrupt line * and IRQ handling. It sets up the IRQ line to be handled as an NMI. * * An interrupt line delivering NMIs cannot be shared and IRQ handling * cannot be threaded. * * Interrupt lines requested for NMI delivering must produce per cpu * interrupts and have auto enabling setting disabled. * * @dev_id must be globally unique. Normally the address of the device data * structure is used as the cookie. Since the handler receives this value * it makes sense to use it. * * If the interrupt line cannot be used to deliver NMIs, function will fail * and return a negative value. */ int request_nmi(unsigned int irq, irq_handler_t handler, unsigned long irqflags, const char *name, void *dev_id) { struct irqaction *action; struct irq_desc *desc; int retval; if (irq == IRQ_NOTCONNECTED) return -ENOTCONN; /* NMI cannot be shared, used for Polling */ if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL)) return -EINVAL; if (!(irqflags & IRQF_PERCPU)) return -EINVAL; if (!handler) return -EINVAL; desc = irq_to_desc(irq); if (!desc || (irq_settings_can_autoenable(desc) && !(irqflags & IRQF_NO_AUTOEN)) || !irq_settings_can_request(desc) || WARN_ON(irq_settings_is_per_cpu_devid(desc)) || !irq_supports_nmi(desc)) return -EINVAL; action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); if (!action) return -ENOMEM; action->handler = handler; action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING; action->name = name; action->dev_id = dev_id; retval = irq_chip_pm_get(&desc->irq_data); if (retval < 0) goto err_out; retval = __setup_irq(irq, desc, action); if (retval) goto err_irq_setup; scoped_guard(raw_spinlock_irqsave, &desc->lock) { /* Setup NMI state */ desc->istate |= IRQS_NMI; retval = irq_nmi_setup(desc); if (retval) { __cleanup_nmi(irq, desc); return -EINVAL; } return 0; } err_irq_setup: irq_chip_pm_put(&desc->irq_data); err_out: kfree(action); return retval; } void enable_percpu_irq(unsigned int irq, unsigned int type) { scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_PERCPU) { struct irq_desc *desc = scoped_irqdesc; /* * If the trigger type is not specified by the caller, then * use the default for this interrupt. */ type &= IRQ_TYPE_SENSE_MASK; if (type == IRQ_TYPE_NONE) type = irqd_get_trigger_type(&desc->irq_data); if (type != IRQ_TYPE_NONE) { if (__irq_set_trigger(desc, type)) { WARN(1, "failed to set type for IRQ%d\n", irq); return; } } irq_percpu_enable(desc, smp_processor_id()); } } EXPORT_SYMBOL_GPL(enable_percpu_irq); void enable_percpu_nmi(unsigned int irq, unsigned int type) { enable_percpu_irq(irq, type); } /** * irq_percpu_is_enabled - Check whether the per cpu irq is enabled * @irq: Linux irq number to check for * * Must be called from a non migratable context. Returns the enable * state of a per cpu interrupt on the current cpu. */ bool irq_percpu_is_enabled(unsigned int irq) { scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_PERCPU) return cpumask_test_cpu(smp_processor_id(), scoped_irqdesc->percpu_enabled); return false; } EXPORT_SYMBOL_GPL(irq_percpu_is_enabled); void disable_percpu_irq(unsigned int irq) { scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_PERCPU) irq_percpu_disable(scoped_irqdesc, smp_processor_id()); } EXPORT_SYMBOL_GPL(disable_percpu_irq); void disable_percpu_nmi(unsigned int irq) { disable_percpu_irq(irq); } /* * Internal function to unregister a percpu irqaction. */ static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action, **action_ptr; WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); if (!desc) return NULL; scoped_guard(raw_spinlock_irqsave, &desc->lock) { action_ptr = &desc->action; for (;;) { action = *action_ptr; if (!action) { WARN(1, "Trying to free already-free IRQ %d\n", irq); return NULL; } if (action->percpu_dev_id == dev_id) break; action_ptr = &action->next; } if (cpumask_intersects(desc->percpu_enabled, action->affinity)) { WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", irq, cpumask_first_and(desc->percpu_enabled, action->affinity)); return NULL; } /* Found it - now remove it from the list of entries: */ *action_ptr = action->next; /* Demote from NMI if we killed the last action */ if (!desc->action) desc->istate &= ~IRQS_NMI; } unregister_handler_proc(irq, action); irq_chip_pm_put(&desc->irq_data); module_put(desc->owner); return action; } /** * free_percpu_irq - free an interrupt allocated with request_percpu_irq * @irq: Interrupt line to free * @dev_id: Device identity to free * * Remove a percpu interrupt handler. The handler is removed, but the * interrupt line is not disabled. This must be done on each CPU before * calling this function. The function does not return until any executing * interrupts for this IRQ have completed. * * This function must not be called from interrupt context. */ void free_percpu_irq(unsigned int irq, void __percpu *dev_id) { struct irq_desc *desc = irq_to_desc(irq); if (!desc || !irq_settings_is_per_cpu_devid(desc)) return; chip_bus_lock(desc); kfree(__free_percpu_irq(irq, dev_id)); chip_bus_sync_unlock(desc); } EXPORT_SYMBOL_GPL(free_percpu_irq); void free_percpu_nmi(unsigned int irq, void __percpu *dev_id) { struct irq_desc *desc = irq_to_desc(irq); if (!desc || !irq_settings_is_per_cpu_devid(desc)) return; if (WARN_ON(!irq_is_nmi(desc))) return; kfree(__free_percpu_irq(irq, dev_id)); } /** * setup_percpu_irq - setup a per-cpu interrupt * @irq: Interrupt line to setup * @act: irqaction for the interrupt * * Used to statically setup per-cpu interrupts in the early boot process. */ int setup_percpu_irq(unsigned int irq, struct irqaction *act) { struct irq_desc *desc = irq_to_desc(irq); int retval; if (!desc || !irq_settings_is_per_cpu_devid(desc)) return -EINVAL; retval = irq_chip_pm_get(&desc->irq_data); if (retval < 0) return retval; if (!act->affinity) act->affinity = cpu_online_mask; retval = __setup_irq(irq, desc, act); if (retval) irq_chip_pm_put(&desc->irq_data); return retval; } static struct irqaction *create_percpu_irqaction(irq_handler_t handler, unsigned long flags, const char *devname, const cpumask_t *affinity, void __percpu *dev_id) { struct irqaction *action; if (!affinity) affinity = cpu_possible_mask; action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); if (!action) return NULL; action->handler = handler; action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND; action->name = devname; action->percpu_dev_id = dev_id; action->affinity = affinity; /* * We allow some form of sharing for non-overlapping affinity * masks. Obviously, covering all CPUs prevents any sharing in * the first place. */ if (!cpumask_equal(affinity, cpu_possible_mask)) action->flags |= IRQF_SHARED; return action; } /** * __request_percpu_irq - allocate a percpu interrupt line * @irq: Interrupt line to allocate * @handler: Function to be called when the IRQ occurs. * @flags: Interrupt type flags (IRQF_TIMER only) * @devname: An ascii name for the claiming device * @affinity: A cpumask describing the target CPUs for this interrupt * @dev_id: A percpu cookie passed back to the handler function * * This call allocates interrupt resources, but doesn't enable the interrupt * on any CPU, as all percpu-devid interrupts are flagged with IRQ_NOAUTOEN. * It has to be done on each CPU using enable_percpu_irq(). * * @dev_id must be globally unique. It is a per-cpu variable, and * the handler gets called with the interrupted CPU's instance of * that variable. */ int __request_percpu_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, const char *devname, const cpumask_t *affinity, void __percpu *dev_id) { struct irqaction *action; struct irq_desc *desc; int retval; if (!dev_id) return -EINVAL; desc = irq_to_desc(irq); if (!desc || !irq_settings_can_request(desc) || !irq_settings_is_per_cpu_devid(desc)) return -EINVAL; if (flags && flags != IRQF_TIMER) return -EINVAL; action = create_percpu_irqaction(handler, flags, devname, affinity, dev_id); if (!action) return -ENOMEM; retval = irq_chip_pm_get(&desc->irq_data); if (retval < 0) { kfree(action); return retval; } retval = __setup_irq(irq, desc, action); if (retval) { irq_chip_pm_put(&desc->irq_data); kfree(action); } return retval; } EXPORT_SYMBOL_GPL(__request_percpu_irq); /** * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery * @irq: Interrupt line to allocate * @handler: Function to be called when the IRQ occurs. * @name: An ascii name for the claiming device * @affinity: A cpumask describing the target CPUs for this interrupt * @dev_id: A percpu cookie passed back to the handler function * * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs * have to be setup on each CPU by calling prepare_percpu_nmi() before * being enabled on the same CPU by using enable_percpu_nmi(). * * @dev_id must be globally unique. It is a per-cpu variable, and the * handler gets called with the interrupted CPU's instance of that * variable. * * Interrupt lines requested for NMI delivering should have auto enabling * setting disabled. * * If the interrupt line cannot be used to deliver NMIs, function * will fail returning a negative value. */ int request_percpu_nmi(unsigned int irq, irq_handler_t handler, const char *name, const struct cpumask *affinity, void __percpu *dev_id) { struct irqaction *action; struct irq_desc *desc; int retval; if (!handler) return -EINVAL; desc = irq_to_desc(irq); if (!desc || !irq_settings_can_request(desc) || !irq_settings_is_per_cpu_devid(desc) || irq_settings_can_autoenable(desc) || !irq_supports_nmi(desc)) return -EINVAL; /* The line cannot be NMI already if the new request covers all CPUs */ if (irq_is_nmi(desc) && (!affinity || cpumask_equal(affinity, cpu_possible_mask))) return -EINVAL; action = create_percpu_irqaction(handler, IRQF_NO_THREAD | IRQF_NOBALANCING, name, affinity, dev_id); if (!action) return -ENOMEM; retval = irq_chip_pm_get(&desc->irq_data); if (retval < 0) goto err_out; retval = __setup_irq(irq, desc, action); if (retval) goto err_irq_setup; scoped_guard(raw_spinlock_irqsave, &desc->lock) desc->istate |= IRQS_NMI; return 0; err_irq_setup: irq_chip_pm_put(&desc->irq_data); err_out: kfree(action); return retval; } /** * prepare_percpu_nmi - performs CPU local setup for NMI delivery * @irq: Interrupt line to prepare for NMI delivery * * This call prepares an interrupt line to deliver NMI on the current CPU, * before that interrupt line gets enabled with enable_percpu_nmi(). * * As a CPU local operation, this should be called from non-preemptible * context. * * If the interrupt line cannot be used to deliver NMIs, function will fail * returning a negative value. */ int prepare_percpu_nmi(unsigned int irq) { int ret = -EINVAL; WARN_ON(preemptible()); scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_PERCPU) { if (WARN(!irq_is_nmi(scoped_irqdesc), "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n", irq)) return -EINVAL; ret = irq_nmi_setup(scoped_irqdesc); if (ret) pr_err("Failed to setup NMI delivery: irq %u\n", irq); } return ret; } /** * teardown_percpu_nmi - undoes NMI setup of IRQ line * @irq: Interrupt line from which CPU local NMI configuration should be removed * * This call undoes the setup done by prepare_percpu_nmi(). * * IRQ line should not be enabled for the current CPU. * As a CPU local operation, this should be called from non-preemptible * context. */ void teardown_percpu_nmi(unsigned int irq) { WARN_ON(preemptible()); scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_PERCPU) { if (WARN_ON(!irq_is_nmi(scoped_irqdesc))) return; irq_nmi_teardown(scoped_irqdesc); } } static int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which, bool *state) { struct irq_chip *chip; int err = -EINVAL; do { chip = irq_data_get_irq_chip(data); if (WARN_ON_ONCE(!chip)) return -ENODEV; if (chip->irq_get_irqchip_state) break; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY data = data->parent_data; #else data = NULL; #endif } while (data); if (data) err = chip->irq_get_irqchip_state(data, which, state); return err; } /** * irq_get_irqchip_state - returns the irqchip state of a interrupt. * @irq: Interrupt line that is forwarded to a VM * @which: One of IRQCHIP_STATE_* the caller wants to know about * @state: a pointer to a boolean where the state is to be stored * * This call snapshots the internal irqchip state of an interrupt, * returning into @state the bit corresponding to stage @which * * This function should be called with preemption disabled if the interrupt * controller has per-cpu registers. */ int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, bool *state) { scoped_irqdesc_get_and_buslock(irq, 0) { struct irq_data *data = irq_desc_get_irq_data(scoped_irqdesc); return __irq_get_irqchip_state(data, which, state); } return -EINVAL; } EXPORT_SYMBOL_GPL(irq_get_irqchip_state); /** * irq_set_irqchip_state - set the state of a forwarded interrupt. * @irq: Interrupt line that is forwarded to a VM * @which: State to be restored (one of IRQCHIP_STATE_*) * @val: Value corresponding to @which * * This call sets the internal irqchip state of an interrupt, depending on * the value of @which. * * This function should be called with migration disabled if the interrupt * controller has per-cpu registers. */ int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, bool val) { scoped_irqdesc_get_and_buslock(irq, 0) { struct irq_data *data = irq_desc_get_irq_data(scoped_irqdesc); struct irq_chip *chip; do { chip = irq_data_get_irq_chip(data); if (WARN_ON_ONCE(!chip)) return -ENODEV; if (chip->irq_set_irqchip_state) break; data = irqd_get_parent_data(data); } while (data); if (data) return chip->irq_set_irqchip_state(data, which, val); } return -EINVAL; } EXPORT_SYMBOL_GPL(irq_set_irqchip_state); /** * irq_has_action - Check whether an interrupt is requested * @irq: The linux irq number * * Returns: A snapshot of the current state */ bool irq_has_action(unsigned int irq) { bool res; rcu_read_lock(); res = irq_desc_has_action(irq_to_desc(irq)); rcu_read_unlock(); return res; } EXPORT_SYMBOL_GPL(irq_has_action); /** * irq_check_status_bit - Check whether bits in the irq descriptor status are set * @irq: The linux irq number * @bitmask: The bitmask to evaluate * * Returns: True if one of the bits in @bitmask is set */ bool irq_check_status_bit(unsigned int irq, unsigned int bitmask) { struct irq_desc *desc; bool res = false; rcu_read_lock(); desc = irq_to_desc(irq); if (desc) res = !!(desc->status_use_accessors & bitmask); rcu_read_unlock(); return res; } EXPORT_SYMBOL_GPL(irq_check_status_bit);
2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 // SPDX-License-Identifier: GPL-2.0-only /* * linux/net/sunrpc/svc.c * * High-level RPC service routines * * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> * * Multiple threads pools and NUMAisation * Copyright (c) 2006 Silicon Graphics, Inc. * by Greg Banks <gnb@melbourne.sgi.com> */ #include <linux/linkage.h> #include <linux/sched/signal.h> #include <linux/errno.h> #include <linux/net.h> #include <linux/in.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/kthread.h> #include <linux/slab.h> #include <linux/sunrpc/types.h> #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/stats.h> #include <linux/sunrpc/svcsock.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/bc_xprt.h> #include <trace/events/sunrpc.h> #include "fail.h" #include "sunrpc.h" #define RPCDBG_FACILITY RPCDBG_SVCDSP static void svc_unregister(const struct svc_serv *serv, struct net *net); #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL /* * Mode for mapping cpus to pools. */ enum { SVC_POOL_AUTO = -1, /* choose one of the others */ SVC_POOL_GLOBAL, /* no mapping, just a single global pool * (legacy & UP mode) */ SVC_POOL_PERCPU, /* one pool per cpu */ SVC_POOL_PERNODE /* one pool per numa node */ }; /* * Structure for mapping cpus to pools and vice versa. * Setup once during sunrpc initialisation. */ struct svc_pool_map { int count; /* How many svc_servs use us */ int mode; /* Note: int not enum to avoid * warnings about "enumeration value * not handled in switch" */ unsigned int npools; unsigned int *pool_to; /* maps pool id to cpu or node */ unsigned int *to_pool; /* maps cpu or node to pool id */ }; static struct svc_pool_map svc_pool_map = { .mode = SVC_POOL_DEFAULT }; static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */ static int __param_set_pool_mode(const char *val, struct svc_pool_map *m) { int err, mode; mutex_lock(&svc_pool_map_mutex); err = 0; if (!strncmp(val, "auto", 4)) mode = SVC_POOL_AUTO; else if (!strncmp(val, "global", 6)) mode = SVC_POOL_GLOBAL; else if (!strncmp(val, "percpu", 6)) mode = SVC_POOL_PERCPU; else if (!strncmp(val, "pernode", 7)) mode = SVC_POOL_PERNODE; else err = -EINVAL; if (err) goto out; if (m->count == 0) m->mode = mode; else if (mode != m->mode) err = -EBUSY; out: mutex_unlock(&svc_pool_map_mutex); return err; } static int param_set_pool_mode(const char *val, const struct kernel_param *kp) { struct svc_pool_map *m = kp->arg; return __param_set_pool_mode(val, m); } int sunrpc_set_pool_mode(const char *val) { return __param_set_pool_mode(val, &svc_pool_map); } EXPORT_SYMBOL(sunrpc_set_pool_mode); /** * sunrpc_get_pool_mode - get the current pool_mode for the host * @buf: where to write the current pool_mode * @size: size of @buf * * Grab the current pool_mode from the svc_pool_map and write * the resulting string to @buf. Returns the number of characters * written to @buf (a'la snprintf()). */ int sunrpc_get_pool_mode(char *buf, size_t size) { struct svc_pool_map *m = &svc_pool_map; switch (m->mode) { case SVC_POOL_AUTO: return snprintf(buf, size, "auto"); case SVC_POOL_GLOBAL: return snprintf(buf, size, "global"); case SVC_POOL_PERCPU: return snprintf(buf, size, "percpu"); case SVC_POOL_PERNODE: return snprintf(buf, size, "pernode"); default: return snprintf(buf, size, "%d", m->mode); } } EXPORT_SYMBOL(sunrpc_get_pool_mode); static int param_get_pool_mode(char *buf, const struct kernel_param *kp) { char str[16]; int len; len = sunrpc_get_pool_mode(str, ARRAY_SIZE(str)); /* Ensure we have room for newline and NUL */ len = min_t(int, len, ARRAY_SIZE(str) - 2); /* tack on the newline */ str[len] = '\n'; str[len + 1] = '\0'; return sysfs_emit(buf, "%s", str); } module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode, &svc_pool_map, 0644); /* * Detect best pool mapping mode heuristically, * according to the machine's topology. */ static int svc_pool_map_choose_mode(void) { unsigned int node; if (nr_online_nodes > 1) { /* * Actually have multiple NUMA nodes, * so split pools on NUMA node boundaries */ return SVC_POOL_PERNODE; } node = first_online_node; if (nr_cpus_node(node) > 2) { /* * Non-trivial SMP, or CONFIG_NUMA on * non-NUMA hardware, e.g. with a generic * x86_64 kernel on Xeons. In this case we * want to divide the pools on cpu boundaries. */ return SVC_POOL_PERCPU; } /* default: one global pool */ return SVC_POOL_GLOBAL; } /* * Allocate the to_pool[] and pool_to[] arrays. * Returns 0 on success or an errno. */ static int svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools) { m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL); if (!m->to_pool) goto fail; m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL); if (!m->pool_to) goto fail_free; return 0; fail_free: kfree(m->to_pool); m->to_pool = NULL; fail: return -ENOMEM; } /* * Initialise the pool map for SVC_POOL_PERCPU mode. * Returns number of pools or <0 on error. */ static int svc_pool_map_init_percpu(struct svc_pool_map *m) { unsigned int maxpools = nr_cpu_ids; unsigned int pidx = 0; unsigned int cpu; int err; err = svc_pool_map_alloc_arrays(m, maxpools); if (err) return err; for_each_online_cpu(cpu) { BUG_ON(pidx >= maxpools); m->to_pool[cpu] = pidx; m->pool_to[pidx] = cpu; pidx++; } /* cpus brought online later all get mapped to pool0, sorry */ return pidx; }; /* * Initialise the pool map for SVC_POOL_PERNODE mode. * Returns number of pools or <0 on error. */ static int svc_pool_map_init_pernode(struct svc_pool_map *m) { unsigned int maxpools = nr_node_ids; unsigned int pidx = 0; unsigned int node; int err; err = svc_pool_map_alloc_arrays(m, maxpools); if (err) return err; for_each_node_with_cpus(node) { /* some architectures (e.g. SN2) have cpuless nodes */ BUG_ON(pidx > maxpools); m->to_pool[node] = pidx; m->pool_to[pidx] = node; pidx++; } /* nodes brought online later all get mapped to pool0, sorry */ return pidx; } /* * Add a reference to the global map of cpus to pools (and * vice versa) if pools are in use. * Initialise the map if we're the first user. * Returns the number of pools. If this is '1', no reference * was taken. */ static unsigned int svc_pool_map_get(void) { struct svc_pool_map *m = &svc_pool_map; int npools = -1; mutex_lock(&svc_pool_map_mutex); if (m->count++) { mutex_unlock(&svc_pool_map_mutex); return m->npools; } if (m->mode == SVC_POOL_AUTO) m->mode = svc_pool_map_choose_mode(); switch (m->mode) { case SVC_POOL_PERCPU: npools = svc_pool_map_init_percpu(m); break; case SVC_POOL_PERNODE: npools = svc_pool_map_init_pernode(m); break; } if (npools <= 0) { /* default, or memory allocation failure */ npools = 1; m->mode = SVC_POOL_GLOBAL; } m->npools = npools; mutex_unlock(&svc_pool_map_mutex); return npools; } /* * Drop a reference to the global map of cpus to pools. * When the last reference is dropped, the map data is * freed; this allows the sysadmin to change the pool. */ static void svc_pool_map_put(void) { struct svc_pool_map *m = &svc_pool_map; mutex_lock(&svc_pool_map_mutex); if (!--m->count) { kfree(m->to_pool); m->to_pool = NULL; kfree(m->pool_to); m->pool_to = NULL; m->npools = 0; } mutex_unlock(&svc_pool_map_mutex); } static int svc_pool_map_get_node(unsigned int pidx) { const struct svc_pool_map *m = &svc_pool_map; if (m->count) { if (m->mode == SVC_POOL_PERCPU) return cpu_to_node(m->pool_to[pidx]); if (m->mode == SVC_POOL_PERNODE) return m->pool_to[pidx]; } return numa_mem_id(); } /* * Set the given thread's cpus_allowed mask so that it * will only run on cpus in the given pool. */ static inline void svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx) { struct svc_pool_map *m = &svc_pool_map; unsigned int node = m->pool_to[pidx]; /* * The caller checks for sv_nrpools > 1, which * implies that we've been initialized. */ WARN_ON_ONCE(m->count == 0); if (m->count == 0) return; switch (m->mode) { case SVC_POOL_PERCPU: { set_cpus_allowed_ptr(task, cpumask_of(node)); break; } case SVC_POOL_PERNODE: { set_cpus_allowed_ptr(task, cpumask_of_node(node)); break; } } } /** * svc_pool_for_cpu - Select pool to run a thread on this cpu * @serv: An RPC service * * Use the active CPU and the svc_pool_map's mode setting to * select the svc thread pool to use. Once initialized, the * svc_pool_map does not change. * * Return value: * A pointer to an svc_pool */ struct svc_pool *svc_pool_for_cpu(struct svc_serv *serv) { struct svc_pool_map *m = &svc_pool_map; int cpu = raw_smp_processor_id(); unsigned int pidx = 0; if (serv->sv_nrpools <= 1) return serv->sv_pools; switch (m->mode) { case SVC_POOL_PERCPU: pidx = m->to_pool[cpu]; break; case SVC_POOL_PERNODE: pidx = m->to_pool[cpu_to_node(cpu)]; break; } return &serv->sv_pools[pidx % serv->sv_nrpools]; } static int svc_rpcb_setup(struct svc_serv *serv, struct net *net) { int err; err = rpcb_create_local(net); if (err) return err; /* Remove any stale portmap registrations */ svc_unregister(serv, net); return 0; } void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net) { svc_unregister(serv, net); rpcb_put_local(net); } static int svc_uses_rpcbind(struct svc_serv *serv) { unsigned int p, i; for (p = 0; p < serv->sv_nprogs; p++) { struct svc_program *progp = &serv->sv_programs[p]; for (i = 0; i < progp->pg_nvers; i++) { if (progp->pg_vers[i] == NULL) continue; if (!progp->pg_vers[i]->vs_hidden) return 1; } } return 0; } int svc_bind(struct svc_serv *serv, struct net *net) { if (!svc_uses_rpcbind(serv)) return 0; return svc_rpcb_setup(serv, net); } EXPORT_SYMBOL_GPL(svc_bind); #if defined(CONFIG_SUNRPC_BACKCHANNEL) static void __svc_init_bc(struct svc_serv *serv) { lwq_init(&serv->sv_cb_list); } #else static void __svc_init_bc(struct svc_serv *serv) { } #endif /* * Create an RPC service */ static struct svc_serv * __svc_create(struct svc_program *prog, int nprogs, struct svc_stat *stats, unsigned int bufsize, int npools, int (*threadfn)(void *data)) { struct svc_serv *serv; unsigned int vers; unsigned int xdrsize; unsigned int i; if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) return NULL; serv->sv_name = prog->pg_name; serv->sv_programs = prog; serv->sv_nprogs = nprogs; serv->sv_stats = stats; if (bufsize > RPCSVC_MAXPAYLOAD) bufsize = RPCSVC_MAXPAYLOAD; serv->sv_max_payload = bufsize? bufsize : 4096; serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE); serv->sv_threadfn = threadfn; xdrsize = 0; for (i = 0; i < nprogs; i++) { struct svc_program *progp = &prog[i]; progp->pg_lovers = progp->pg_nvers-1; for (vers = 0; vers < progp->pg_nvers ; vers++) if (progp->pg_vers[vers]) { progp->pg_hivers = vers; if (progp->pg_lovers > vers) progp->pg_lovers = vers; if (progp->pg_vers[vers]->vs_xdrsize > xdrsize) xdrsize = progp->pg_vers[vers]->vs_xdrsize; } } serv->sv_xdrsize = xdrsize; INIT_LIST_HEAD(&serv->sv_tempsocks); INIT_LIST_HEAD(&serv->sv_permsocks); timer_setup(&serv->sv_temptimer, NULL, 0); spin_lock_init(&serv->sv_lock); __svc_init_bc(serv); serv->sv_nrpools = npools; serv->sv_pools = kcalloc(serv->sv_nrpools, sizeof(struct svc_pool), GFP_KERNEL); if (!serv->sv_pools) { kfree(serv); return NULL; } for (i = 0; i < serv->sv_nrpools; i++) { struct svc_pool *pool = &serv->sv_pools[i]; dprintk("svc: initialising pool %u for %s\n", i, serv->sv_name); pool->sp_id = i; lwq_init(&pool->sp_xprts); INIT_LIST_HEAD(&pool->sp_all_threads); init_llist_head(&pool->sp_idle_threads); percpu_counter_init(&pool->sp_messages_arrived, 0, GFP_KERNEL); percpu_counter_init(&pool->sp_sockets_queued, 0, GFP_KERNEL); percpu_counter_init(&pool->sp_threads_woken, 0, GFP_KERNEL); } return serv; } /** * svc_create - Create an RPC service * @prog: the RPC program the new service will handle * @bufsize: maximum message size for @prog * @threadfn: a function to service RPC requests for @prog * * Returns an instantiated struct svc_serv object or NULL. */ struct svc_serv *svc_create(struct svc_program *prog, unsigned int bufsize, int (*threadfn)(void *data)) { return __svc_create(prog, 1, NULL, bufsize, 1, threadfn); } EXPORT_SYMBOL_GPL(svc_create); /** * svc_create_pooled - Create an RPC service with pooled threads * @prog: Array of RPC programs the new service will handle * @nprogs: Number of programs in the array * @stats: the stats struct if desired * @bufsize: maximum message size for @prog * @threadfn: a function to service RPC requests for @prog * * Returns an instantiated struct svc_serv object or NULL. */ struct svc_serv *svc_create_pooled(struct svc_program *prog, unsigned int nprogs, struct svc_stat *stats, unsigned int bufsize, int (*threadfn)(void *data)) { struct svc_serv *serv; unsigned int npools = svc_pool_map_get(); serv = __svc_create(prog, nprogs, stats, bufsize, npools, threadfn); if (!serv) goto out_err; serv->sv_is_pooled = true; return serv; out_err: svc_pool_map_put(); return NULL; } EXPORT_SYMBOL_GPL(svc_create_pooled); /* * Destroy an RPC service. Should be called with appropriate locking to * protect sv_permsocks and sv_tempsocks. */ void svc_destroy(struct svc_serv **servp) { struct svc_serv *serv = *servp; unsigned int i; *servp = NULL; dprintk("svc: svc_destroy(%s)\n", serv->sv_programs->pg_name); timer_shutdown_sync(&serv->sv_temptimer); /* * Remaining transports at this point are not expected. */ WARN_ONCE(!list_empty(&serv->sv_permsocks), "SVC: permsocks remain for %s\n", serv->sv_programs->pg_name); WARN_ONCE(!list_empty(&serv->sv_tempsocks), "SVC: tempsocks remain for %s\n", serv->sv_programs->pg_name); cache_clean_deferred(serv); if (serv->sv_is_pooled) svc_pool_map_put(); for (i = 0; i < serv->sv_nrpools; i++) { struct svc_pool *pool = &serv->sv_pools[i]; percpu_counter_destroy(&pool->sp_messages_arrived); percpu_counter_destroy(&pool->sp_sockets_queued); percpu_counter_destroy(&pool->sp_threads_woken); } kfree(serv->sv_pools); kfree(serv); } EXPORT_SYMBOL_GPL(svc_destroy); static bool svc_init_buffer(struct svc_rqst *rqstp, const struct svc_serv *serv, int node) { rqstp->rq_maxpages = svc_serv_maxpages(serv); /* rq_pages' last entry is NULL for historical reasons. */ rqstp->rq_pages = kcalloc_node(rqstp->rq_maxpages + 1, sizeof(struct page *), GFP_KERNEL, node); if (!rqstp->rq_pages) return false; return true; } /* * Release an RPC server buffer */ static void svc_release_buffer(struct svc_rqst *rqstp) { unsigned long i; for (i = 0; i < rqstp->rq_maxpages; i++) if (rqstp->rq_pages[i]) put_page(rqstp->rq_pages[i]); kfree(rqstp->rq_pages); } static void svc_rqst_free(struct svc_rqst *rqstp) { folio_batch_release(&rqstp->rq_fbatch); kfree(rqstp->rq_bvec); svc_release_buffer(rqstp); if (rqstp->rq_scratch_folio) folio_put(rqstp->rq_scratch_folio); kfree(rqstp->rq_resp); kfree(rqstp->rq_argp); kfree(rqstp->rq_auth_data); kfree_rcu(rqstp, rq_rcu_head); } static struct svc_rqst * svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node) { struct svc_rqst *rqstp; rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node); if (!rqstp) return rqstp; folio_batch_init(&rqstp->rq_fbatch); rqstp->rq_server = serv; rqstp->rq_pool = pool; rqstp->rq_scratch_folio = __folio_alloc_node(GFP_KERNEL, 0, node); if (!rqstp->rq_scratch_folio) goto out_enomem; rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node); if (!rqstp->rq_argp) goto out_enomem; rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node); if (!rqstp->rq_resp) goto out_enomem; if (!svc_init_buffer(rqstp, serv, node)) goto out_enomem; rqstp->rq_bvec = kcalloc_node(rqstp->rq_maxpages, sizeof(struct bio_vec), GFP_KERNEL, node); if (!rqstp->rq_bvec) goto out_enomem; rqstp->rq_err = -EAGAIN; /* No error yet */ serv->sv_nrthreads += 1; pool->sp_nrthreads += 1; /* Protected by whatever lock the service uses when calling * svc_set_num_threads() */ list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads); return rqstp; out_enomem: svc_rqst_free(rqstp); return NULL; } /** * svc_pool_wake_idle_thread - Awaken an idle thread in @pool * @pool: service thread pool * * Can be called from soft IRQ or process context. Finding an idle * service thread and marking it BUSY is atomic with respect to * other calls to svc_pool_wake_idle_thread(). * */ void svc_pool_wake_idle_thread(struct svc_pool *pool) { struct svc_rqst *rqstp; struct llist_node *ln; rcu_read_lock(); ln = READ_ONCE(pool->sp_idle_threads.first); if (ln) { rqstp = llist_entry(ln, struct svc_rqst, rq_idle); WRITE_ONCE(rqstp->rq_qtime, ktime_get()); if (!task_is_running(rqstp->rq_task)) { wake_up_process(rqstp->rq_task); trace_svc_pool_thread_wake(pool, rqstp->rq_task->pid); percpu_counter_inc(&pool->sp_threads_woken); } else { trace_svc_pool_thread_running(pool, rqstp->rq_task->pid); } rcu_read_unlock(); return; } rcu_read_unlock(); trace_svc_pool_thread_noidle(pool, 0); } EXPORT_SYMBOL_GPL(svc_pool_wake_idle_thread); static struct svc_pool * svc_pool_next(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) { return pool ? pool : &serv->sv_pools[(*state)++ % serv->sv_nrpools]; } static struct svc_pool * svc_pool_victim(struct svc_serv *serv, struct svc_pool *target_pool, unsigned int *state) { struct svc_pool *pool; unsigned int i; pool = target_pool; if (!pool) { for (i = 0; i < serv->sv_nrpools; i++) { pool = &serv->sv_pools[--(*state) % serv->sv_nrpools]; if (pool->sp_nrthreads) break; } } if (pool && pool->sp_nrthreads) { set_bit(SP_VICTIM_REMAINS, &pool->sp_flags); set_bit(SP_NEED_VICTIM, &pool->sp_flags); return pool; } return NULL; } static int svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) { struct svc_rqst *rqstp; struct task_struct *task; struct svc_pool *chosen_pool; unsigned int state = serv->sv_nrthreads-1; int node; int err; do { nrservs--; chosen_pool = svc_pool_next(serv, pool, &state); node = svc_pool_map_get_node(chosen_pool->sp_id); rqstp = svc_prepare_thread(serv, chosen_pool, node); if (!rqstp) return -ENOMEM; task = kthread_create_on_node(serv->sv_threadfn, rqstp, node, "%s", serv->sv_name); if (IS_ERR(task)) { svc_exit_thread(rqstp); return PTR_ERR(task); } rqstp->rq_task = task; if (serv->sv_nrpools > 1) svc_pool_map_set_cpumask(task, chosen_pool->sp_id); svc_sock_update_bufs(serv); wake_up_process(task); wait_var_event(&rqstp->rq_err, rqstp->rq_err != -EAGAIN); err = rqstp->rq_err; if (err) { svc_exit_thread(rqstp); return err; } } while (nrservs > 0); return 0; } static int svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) { unsigned int state = serv->sv_nrthreads-1; struct svc_pool *victim; do { victim = svc_pool_victim(serv, pool, &state); if (!victim) break; svc_pool_wake_idle_thread(victim); wait_on_bit(&victim->sp_flags, SP_VICTIM_REMAINS, TASK_IDLE); nrservs++; } while (nrservs < 0); return 0; } /** * svc_set_num_threads - adjust number of threads per RPC service * @serv: RPC service to adjust * @pool: Specific pool from which to choose threads, or NULL * @nrservs: New number of threads for @serv (0 or less means kill all threads) * * Create or destroy threads to make the number of threads for @serv the * given number. If @pool is non-NULL, change only threads in that pool; * otherwise, round-robin between all pools for @serv. @serv's * sv_nrthreads is adjusted for each thread created or destroyed. * * Caller must ensure mutual exclusion between this and server startup or * shutdown. * * Returns zero on success or a negative errno if an error occurred while * starting a thread. */ int svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) { if (!pool) nrservs -= serv->sv_nrthreads; else nrservs -= pool->sp_nrthreads; if (nrservs > 0) return svc_start_kthreads(serv, pool, nrservs); if (nrservs < 0) return svc_stop_kthreads(serv, pool, nrservs); return 0; } EXPORT_SYMBOL_GPL(svc_set_num_threads); /** * svc_rqst_replace_page - Replace one page in rq_pages[] * @rqstp: svc_rqst with pages to replace * @page: replacement page * * When replacing a page in rq_pages, batch the release of the * replaced pages to avoid hammering the page allocator. * * Return values: * %true: page replaced * %false: array bounds checking failed */ bool svc_rqst_replace_page(struct svc_rqst *rqstp, struct page *page) { struct page **begin = rqstp->rq_pages; struct page **end = &rqstp->rq_pages[rqstp->rq_maxpages]; if (unlikely(rqstp->rq_next_page < begin || rqstp->rq_next_page > end)) { trace_svc_replace_page_err(rqstp); return false; } if (*rqstp->rq_next_page) { if (!folio_batch_add(&rqstp->rq_fbatch, page_folio(*rqstp->rq_next_page))) __folio_batch_release(&rqstp->rq_fbatch); } get_page(page); *(rqstp->rq_next_page++) = page; return true; } EXPORT_SYMBOL_GPL(svc_rqst_replace_page); /** * svc_rqst_release_pages - Release Reply buffer pages * @rqstp: RPC transaction context * * Release response pages that might still be in flight after * svc_send, and any spliced filesystem-owned pages. */ void svc_rqst_release_pages(struct svc_rqst *rqstp) { int i, count = rqstp->rq_next_page - rqstp->rq_respages; if (count) { release_pages(rqstp->rq_respages, count); for (i = 0; i < count; i++) rqstp->rq_respages[i] = NULL; } } /** * svc_exit_thread - finalise the termination of a sunrpc server thread * @rqstp: the svc_rqst which represents the thread. * * When a thread started with svc_new_thread() exits it must call * svc_exit_thread() as its last act. This must be done with the * service mutex held. Normally this is held by a DIFFERENT thread, the * one that is calling svc_set_num_threads() and which will wait for * SP_VICTIM_REMAINS to be cleared before dropping the mutex. If the * thread exits for any reason other than svc_thread_should_stop() * returning %true (which indicated that svc_set_num_threads() is * waiting for it to exit), then it must take the service mutex itself, * which can only safely be done using mutex_try_lock(). */ void svc_exit_thread(struct svc_rqst *rqstp) { struct svc_serv *serv = rqstp->rq_server; struct svc_pool *pool = rqstp->rq_pool; list_del_rcu(&rqstp->rq_all); pool->sp_nrthreads -= 1; serv->sv_nrthreads -= 1; svc_sock_update_bufs(serv); svc_rqst_free(rqstp); clear_and_wake_up_bit(SP_VICTIM_REMAINS, &pool->sp_flags); } EXPORT_SYMBOL_GPL(svc_exit_thread); /* * Register an "inet" protocol family netid with the local * rpcbind daemon via an rpcbind v4 SET request. * * No netconfig infrastructure is available in the kernel, so * we map IP_ protocol numbers to netids by hand. * * Returns zero on success; a negative errno value is returned * if any error occurs. */ static int __svc_rpcb_register4(struct net *net, const u32 program, const u32 version, const unsigned short protocol, const unsigned short port) { const struct sockaddr_in sin = { .sin_family = AF_INET, .sin_addr.s_addr = htonl(INADDR_ANY), .sin_port = htons(port), }; const char *netid; int error; switch (protocol) { case IPPROTO_UDP: netid = RPCBIND_NETID_UDP; break; case IPPROTO_TCP: netid = RPCBIND_NETID_TCP; break; default: return -ENOPROTOOPT; } error = rpcb_v4_register(net, program, version, (const struct sockaddr *)&sin, netid); /* * User space didn't support rpcbind v4, so retry this * registration request with the legacy rpcbind v2 protocol. */ if (error == -EPROTONOSUPPORT) error = rpcb_register(net, program, version, protocol, port); return error; } #if IS_ENABLED(CONFIG_IPV6) /* * Register an "inet6" protocol family netid with the local * rpcbind daemon via an rpcbind v4 SET request. * * No netconfig infrastructure is available in the kernel, so * we map IP_ protocol numbers to netids by hand. * * Returns zero on success; a negative errno value is returned * if any error occurs. */ static int __svc_rpcb_register6(struct net *net, const u32 program, const u32 version, const unsigned short protocol, const unsigned short port) { const struct sockaddr_in6 sin6 = { .sin6_family = AF_INET6, .sin6_addr = IN6ADDR_ANY_INIT, .sin6_port = htons(port), }; const char *netid; int error; switch (protocol) { case IPPROTO_UDP: netid = RPCBIND_NETID_UDP6; break; case IPPROTO_TCP: netid = RPCBIND_NETID_TCP6; break; default: return -ENOPROTOOPT; } error = rpcb_v4_register(net, program, version, (const struct sockaddr *)&sin6, netid); /* * User space didn't support rpcbind version 4, so we won't * use a PF_INET6 listener. */ if (error == -EPROTONOSUPPORT) error = -EAFNOSUPPORT; return error; } #endif /* IS_ENABLED(CONFIG_IPV6) */ /* * Register a kernel RPC service via rpcbind version 4. * * Returns zero on success; a negative errno value is returned * if any error occurs. */ static int __svc_register(struct net *net, const char *progname, const u32 program, const u32 version, const int family, const unsigned short protocol, const unsigned short port) { int error = -EAFNOSUPPORT; switch (family) { case PF_INET: error = __svc_rpcb_register4(net, program, version, protocol, port); break; #if IS_ENABLED(CONFIG_IPV6) case PF_INET6: error = __svc_rpcb_register6(net, program, version, protocol, port); #endif } trace_svc_register(progname, version, family, protocol, port, error); return error; } static int svc_rpcbind_set_version(struct net *net, const struct svc_program *progp, u32 version, int family, unsigned short proto, unsigned short port) { return __svc_register(net, progp->pg_name, progp->pg_prog, version, family, proto, port); } int svc_generic_rpcbind_set(struct net *net, const struct svc_program *progp, u32 version, int family, unsigned short proto, unsigned short port) { const struct svc_version *vers = progp->pg_vers[version]; int error; if (vers == NULL) return 0; if (vers->vs_hidden) { trace_svc_noregister(progp->pg_name, version, proto, port, family, 0); return 0; } /* * Don't register a UDP port if we need congestion * control. */ if (vers->vs_need_cong_ctrl && proto == IPPROTO_UDP) return 0; error = svc_rpcbind_set_version(net, progp, version, family, proto, port); return (vers->vs_rpcb_optnl) ? 0 : error; } EXPORT_SYMBOL_GPL(svc_generic_rpcbind_set); /** * svc_register - register an RPC service with the local portmapper * @serv: svc_serv struct for the service to register * @net: net namespace for the service to register * @family: protocol family of service's listener socket * @proto: transport protocol number to advertise * @port: port to advertise * * Service is registered for any address in the passed-in protocol family */ int svc_register(const struct svc_serv *serv, struct net *net, const int family, const unsigned short proto, const unsigned short port) { unsigned int p, i; int error = 0; WARN_ON_ONCE(proto == 0 && port == 0); if (proto == 0 && port == 0) return -EINVAL; for (p = 0; p < serv->sv_nprogs; p++) { struct svc_program *progp = &serv->sv_programs[p]; for (i = 0; i < progp->pg_nvers; i++) { error = progp->pg_rpcbind_set(net, progp, i, family, proto, port); if (error < 0) { printk(KERN_WARNING "svc: failed to register " "%sv%u RPC service (errno %d).\n", progp->pg_name, i, -error); break; } } } return error; } /* * If user space is running rpcbind, it should take the v4 UNSET * and clear everything for this [program, version]. If user space * is running portmap, it will reject the v4 UNSET, but won't have * any "inet6" entries anyway. So a PMAP_UNSET should be sufficient * in this case to clear all existing entries for [program, version]. */ static void __svc_unregister(struct net *net, const u32 program, const u32 version, const char *progname) { int error; error = rpcb_v4_register(net, program, version, NULL, ""); /* * User space didn't support rpcbind v4, so retry this * request with the legacy rpcbind v2 protocol. */ if (error == -EPROTONOSUPPORT) error = rpcb_register(net, program, version, 0, 0); trace_svc_unregister(progname, version, error); } /* * All netids, bind addresses and ports registered for [program, version] * are removed from the local rpcbind database (if the service is not * hidden) to make way for a new instance of the service. * * The result of unregistration is reported via dprintk for those who want * verification of the result, but is otherwise not important. */ static void svc_unregister(const struct svc_serv *serv, struct net *net) { struct sighand_struct *sighand; unsigned long flags; unsigned int p, i; clear_thread_flag(TIF_SIGPENDING); for (p = 0; p < serv->sv_nprogs; p++) { struct svc_program *progp = &serv->sv_programs[p]; for (i = 0; i < progp->pg_nvers; i++) { if (progp->pg_vers[i] == NULL) continue; if (progp->pg_vers[i]->vs_hidden) continue; __svc_unregister(net, progp->pg_prog, i, progp->pg_name); } } rcu_read_lock(); sighand = rcu_dereference(current->sighand); spin_lock_irqsave(&sighand->siglock, flags); recalc_sigpending(); spin_unlock_irqrestore(&sighand->siglock, flags); rcu_read_unlock(); } /* * dprintk the given error with the address of the client that caused it. */ #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) static __printf(2, 3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) { struct va_format vaf; va_list args; char buf[RPC_MAX_ADDRBUFLEN]; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; dprintk("svc: %s: %pV", svc_print_addr(rqstp, buf, sizeof(buf)), &vaf); va_end(args); } #else static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {} #endif __be32 svc_generic_init_request(struct svc_rqst *rqstp, const struct svc_program *progp, struct svc_process_info *ret) { const struct svc_version *versp = NULL; /* compiler food */ const struct svc_procedure *procp = NULL; if (rqstp->rq_vers >= progp->pg_nvers ) goto err_bad_vers; versp = progp->pg_vers[rqstp->rq_vers]; if (!versp) goto err_bad_vers; /* * Some protocol versions (namely NFSv4) require some form of * congestion control. (See RFC 7530 section 3.1 paragraph 2) * In other words, UDP is not allowed. We mark those when setting * up the svc_xprt, and verify that here. * * The spec is not very clear about what error should be returned * when someone tries to access a server that is listening on UDP * for lower versions. RPC_PROG_MISMATCH seems to be the closest * fit. */ if (versp->vs_need_cong_ctrl && rqstp->rq_xprt && !test_bit(XPT_CONG_CTRL, &rqstp->rq_xprt->xpt_flags)) goto err_bad_vers; if (rqstp->rq_proc >= versp->vs_nproc) goto err_bad_proc; rqstp->rq_procinfo = procp = &versp->vs_proc[rqstp->rq_proc]; /* Initialize storage for argp and resp */ memset(rqstp->rq_argp, 0, procp->pc_argzero); memset(rqstp->rq_resp, 0, procp->pc_ressize); /* Bump per-procedure stats counter */ this_cpu_inc(versp->vs_count[rqstp->rq_proc]); ret->dispatch = versp->vs_dispatch; return rpc_success; err_bad_vers: ret->mismatch.lovers = progp->pg_lovers; ret->mismatch.hivers = progp->pg_hivers; return rpc_prog_mismatch; err_bad_proc: return rpc_proc_unavail; } EXPORT_SYMBOL_GPL(svc_generic_init_request); /* * Common routine for processing the RPC request. */ static int svc_process_common(struct svc_rqst *rqstp) { struct xdr_stream *xdr = &rqstp->rq_res_stream; struct svc_program *progp = NULL; const struct svc_procedure *procp = NULL; struct svc_serv *serv = rqstp->rq_server; struct svc_process_info process; enum svc_auth_status auth_res; unsigned int aoffset; int pr, rc; __be32 *p; /* Reset the accept_stat for the RPC */ rqstp->rq_accept_statp = NULL; /* Will be turned off only when NFSv4 Sessions are used */ set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags); clear_bit(RQ_DROPME, &rqstp->rq_flags); /* Construct the first words of the reply: */ svcxdr_init_encode(rqstp); xdr_stream_encode_be32(xdr, rqstp->rq_xid); xdr_stream_encode_be32(xdr, rpc_reply); p = xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 4); if (unlikely(!p)) goto err_short_len; if (*p++ != cpu_to_be32(RPC_VERSION)) goto err_bad_rpc; xdr_stream_encode_be32(xdr, rpc_msg_accepted); rqstp->rq_prog = be32_to_cpup(p++); rqstp->rq_vers = be32_to_cpup(p++); rqstp->rq_proc = be32_to_cpup(p); for (pr = 0; pr < serv->sv_nprogs; pr++) if (rqstp->rq_prog == serv->sv_programs[pr].pg_prog) progp = &serv->sv_programs[pr]; /* * Decode auth data, and add verifier to reply buffer. * We do this before anything else in order to get a decent * auth verifier. */ auth_res = svc_authenticate(rqstp); /* Also give the program a chance to reject this call: */ if (auth_res == SVC_OK && progp) auth_res = progp->pg_authenticate(rqstp); trace_svc_authenticate(rqstp, auth_res); switch (auth_res) { case SVC_OK: break; case SVC_GARBAGE: rqstp->rq_auth_stat = rpc_autherr_badcred; goto err_bad_auth; case SVC_DENIED: goto err_bad_auth; case SVC_CLOSE: goto close; case SVC_DROP: goto dropit; case SVC_COMPLETE: goto sendit; default: pr_warn_once("Unexpected svc_auth_status (%d)\n", auth_res); rqstp->rq_auth_stat = rpc_autherr_failed; goto err_bad_auth; } if (progp == NULL) goto err_bad_prog; switch (progp->pg_init_request(rqstp, progp, &process)) { case rpc_success: break; case rpc_prog_unavail: goto err_bad_prog; case rpc_prog_mismatch: goto err_bad_vers; case rpc_proc_unavail: goto err_bad_proc; } procp = rqstp->rq_procinfo; /* Should this check go into the dispatcher? */ if (!procp || !procp->pc_func) goto err_bad_proc; /* Syntactic check complete */ if (serv->sv_stats) serv->sv_stats->rpccnt++; trace_svc_process(rqstp, progp->pg_name); aoffset = xdr_stream_pos(xdr); /* un-reserve some of the out-queue now that we have a * better idea of reply size */ if (procp->pc_xdrressize) svc_reserve_auth(rqstp, procp->pc_xdrressize<<2); /* Call the function that processes the request. */ rc = process.dispatch(rqstp); xdr_finish_decode(xdr); if (!rc) goto dropit; if (rqstp->rq_auth_stat != rpc_auth_ok) goto err_bad_auth; if (*rqstp->rq_accept_statp != rpc_success) xdr_truncate_encode(xdr, aoffset); if (procp->pc_encode == NULL) goto dropit; sendit: if (svc_authorise(rqstp)) goto close_xprt; return 1; /* Caller can now send it */ dropit: svc_authorise(rqstp); /* doesn't hurt to call this twice */ dprintk("svc: svc_process dropit\n"); return 0; close: svc_authorise(rqstp); close_xprt: if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags)) svc_xprt_close(rqstp->rq_xprt); dprintk("svc: svc_process close\n"); return 0; err_short_len: svc_printk(rqstp, "short len %u, dropping request\n", rqstp->rq_arg.len); goto close_xprt; err_bad_rpc: if (serv->sv_stats) serv->sv_stats->rpcbadfmt++; xdr_stream_encode_u32(xdr, RPC_MSG_DENIED); xdr_stream_encode_u32(xdr, RPC_MISMATCH); /* Only RPCv2 supported */ xdr_stream_encode_u32(xdr, RPC_VERSION); xdr_stream_encode_u32(xdr, RPC_VERSION); return 1; /* don't wrap */ err_bad_auth: dprintk("svc: authentication failed (%d)\n", be32_to_cpu(rqstp->rq_auth_stat)); if (serv->sv_stats) serv->sv_stats->rpcbadauth++; /* Restore write pointer to location of reply status: */ xdr_truncate_encode(xdr, XDR_UNIT * 2); xdr_stream_encode_u32(xdr, RPC_MSG_DENIED); xdr_stream_encode_u32(xdr, RPC_AUTH_ERROR); xdr_stream_encode_be32(xdr, rqstp->rq_auth_stat); goto sendit; err_bad_prog: dprintk("svc: unknown program %d\n", rqstp->rq_prog); if (serv->sv_stats) serv->sv_stats->rpcbadfmt++; *rqstp->rq_accept_statp = rpc_prog_unavail; goto sendit; err_bad_vers: svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n", rqstp->rq_vers, rqstp->rq_prog, progp->pg_name); if (serv->sv_stats) serv->sv_stats->rpcbadfmt++; *rqstp->rq_accept_statp = rpc_prog_mismatch; /* * svc_authenticate() has already added the verifier and * advanced the stream just past rq_accept_statp. */ xdr_stream_encode_u32(xdr, process.mismatch.lovers); xdr_stream_encode_u32(xdr, process.mismatch.hivers); goto sendit; err_bad_proc: svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc); if (serv->sv_stats) serv->sv_stats->rpcbadfmt++; *rqstp->rq_accept_statp = rpc_proc_unavail; goto sendit; } /* * Drop request */ static void svc_drop(struct svc_rqst *rqstp) { trace_svc_drop(rqstp); } static void svc_release_rqst(struct svc_rqst *rqstp) { const struct svc_procedure *procp = rqstp->rq_procinfo; if (procp && procp->pc_release) procp->pc_release(rqstp); } /** * svc_process - Execute one RPC transaction * @rqstp: RPC transaction context * */ void svc_process(struct svc_rqst *rqstp) { struct kvec *resv = &rqstp->rq_res.head[0]; __be32 *p; #if IS_ENABLED(CONFIG_FAIL_SUNRPC) if (!fail_sunrpc.ignore_server_disconnect && should_fail(&fail_sunrpc.attr, 1)) svc_xprt_deferred_close(rqstp->rq_xprt); #endif /* * Setup response xdr_buf. * Initially it has just one page */ rqstp->rq_next_page = &rqstp->rq_respages[1]; resv->iov_base = page_address(rqstp->rq_respages[0]); resv->iov_len = 0; rqstp->rq_res.pages = rqstp->rq_next_page; rqstp->rq_res.len = 0; rqstp->rq_res.page_base = 0; rqstp->rq_res.page_len = 0; rqstp->rq_res.buflen = PAGE_SIZE; rqstp->rq_res.tail[0].iov_base = NULL; rqstp->rq_res.tail[0].iov_len = 0; svcxdr_init_decode(rqstp); p = xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2); if (unlikely(!p)) goto out_drop; rqstp->rq_xid = *p++; if (unlikely(*p != rpc_call)) goto out_baddir; if (!svc_process_common(rqstp)) { svc_release_rqst(rqstp); goto out_drop; } svc_send(rqstp); svc_release_rqst(rqstp); return; out_baddir: svc_printk(rqstp, "bad direction 0x%08x, dropping request\n", be32_to_cpu(*p)); if (rqstp->rq_server->sv_stats) rqstp->rq_server->sv_stats->rpcbadfmt++; out_drop: svc_drop(rqstp); } #if defined(CONFIG_SUNRPC_BACKCHANNEL) /** * svc_process_bc - process a reverse-direction RPC request * @req: RPC request to be used for client-side processing * @rqstp: server-side execution context * */ void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp) { struct rpc_timeout timeout = { .to_increment = 0, }; struct rpc_task *task; int proc_error; /* Build the svc_rqst used by the common processing routine */ rqstp->rq_xid = req->rq_xid; rqstp->rq_prot = req->rq_xprt->prot; rqstp->rq_bc_net = req->rq_xprt->xprt_net; rqstp->rq_addrlen = sizeof(req->rq_xprt->addr); memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen); memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg)); memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res)); /* Adjust the argument buffer length */ rqstp->rq_arg.len = req->rq_private_buf.len; if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) { rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len; rqstp->rq_arg.page_len = 0; } else if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len) rqstp->rq_arg.page_len = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; else rqstp->rq_arg.len = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len; /* Reset the response buffer */ rqstp->rq_res.head[0].iov_len = 0; /* * Skip the XID and calldir fields because they've already * been processed by the caller. */ svcxdr_init_decode(rqstp); if (!xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2)) return; /* Parse and execute the bc call */ proc_error = svc_process_common(rqstp); atomic_dec(&req->rq_xprt->bc_slot_count); if (!proc_error) { /* Processing error: drop the request */ xprt_free_bc_request(req); svc_release_rqst(rqstp); return; } /* Finally, send the reply synchronously */ if (rqstp->bc_to_initval > 0) { timeout.to_initval = rqstp->bc_to_initval; timeout.to_retries = rqstp->bc_to_retries; } else { timeout.to_initval = req->rq_xprt->timeout->to_initval; timeout.to_retries = req->rq_xprt->timeout->to_retries; } timeout.to_maxval = timeout.to_initval; memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf)); task = rpc_run_bc_task(req, &timeout); svc_release_rqst(rqstp); if (IS_ERR(task)) return; WARN_ON_ONCE(atomic_read(&task->tk_count) != 1); rpc_put_task(task); } #endif /* CONFIG_SUNRPC_BACKCHANNEL */ /** * svc_max_payload - Return transport-specific limit on the RPC payload * @rqstp: RPC transaction context * * Returns the maximum number of payload bytes the current transport * allows. */ u32 svc_max_payload(const struct svc_rqst *rqstp) { u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload; if (rqstp->rq_server->sv_max_payload < max) max = rqstp->rq_server->sv_max_payload; return max; } EXPORT_SYMBOL_GPL(svc_max_payload); /** * svc_proc_name - Return RPC procedure name in string form * @rqstp: svc_rqst to operate on * * Return value: * Pointer to a NUL-terminated string */ const char *svc_proc_name(const struct svc_rqst *rqstp) { if (rqstp && rqstp->rq_procinfo) return rqstp->rq_procinfo->pc_name; return "unknown"; } /** * svc_encode_result_payload - mark a range of bytes as a result payload * @rqstp: svc_rqst to operate on * @offset: payload's byte offset in rqstp->rq_res * @length: size of payload, in bytes * * Returns zero on success, or a negative errno if a permanent * error occurred. */ int svc_encode_result_payload(struct svc_rqst *rqstp, unsigned int offset, unsigned int length) { return rqstp->rq_xprt->xpt_ops->xpo_result_payload(rqstp, offset, length); } EXPORT_SYMBOL_GPL(svc_encode_result_payload); /** * svc_fill_symlink_pathname - Construct pathname argument for VFS symlink call * @rqstp: svc_rqst to operate on * @first: buffer containing first section of pathname * @p: buffer containing remaining section of pathname * @total: total length of the pathname argument * * The VFS symlink API demands a NUL-terminated pathname in mapped memory. * Returns pointer to a NUL-terminated string, or an ERR_PTR. Caller must free * the returned string. */ char *svc_fill_symlink_pathname(struct svc_rqst *rqstp, struct kvec *first, void *p, size_t total) { size_t len, remaining; char *result, *dst; result = kmalloc(total + 1, GFP_KERNEL); if (!result) return ERR_PTR(-ESERVERFAULT); dst = result; remaining = total; len = min_t(size_t, total, first->iov_len); if (len) { memcpy(dst, first->iov_base, len); dst += len; remaining -= len; } if (remaining) { len = min_t(size_t, remaining, PAGE_SIZE); memcpy(dst, p, len); dst += len; } *dst = '\0'; /* Sanity check: Linux doesn't allow the pathname argument to * contain a NUL byte. */ if (strlen(result) != total) { kfree(result); return ERR_PTR(-EINVAL); } return result; } EXPORT_SYMBOL_GPL(svc_fill_symlink_pathname);
23 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 // SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include <linux/export.h> #include <linux/etherdevice.h> #include "htt.h" #include "mac.h" #include "hif.h" #include "txrx.h" #include "debug.h" static u8 ath10k_htt_tx_txq_calc_size(size_t count) { int exp; int factor; exp = 0; factor = count >> 7; while (factor >= 64 && exp < 4) { factor >>= 3; exp++; } if (exp == 4) return 0xff; if (count > 0) factor = max(1, factor); return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) | SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR); } static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { struct ath10k *ar = hw->priv; struct ath10k_sta *arsta; struct ath10k_vif *arvif = (void *)txq->vif->drv_priv; unsigned long byte_cnt; int idx; u32 bit; u16 peer_id; u8 tid; u8 count; lockdep_assert_held(&ar->htt.tx_lock); if (!ar->htt.tx_q_state.enabled) return; if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL) return; if (txq->sta) { arsta = (void *)txq->sta->drv_priv; peer_id = arsta->peer_id; } else { peer_id = arvif->peer_id; } tid = txq->tid; bit = BIT(peer_id % 32); idx = peer_id / 32; ieee80211_txq_get_depth(txq, NULL, &byte_cnt); count = ath10k_htt_tx_txq_calc_size(byte_cnt); if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || unlikely(tid >= ar->htt.tx_q_state.num_tids)) { ath10k_warn(ar, "refusing to update txq for peer_id %u tid %u due to out of bounds\n", peer_id, tid); return; } ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count; ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit; ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0; ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %u tid %u count %u\n", peer_id, tid, count); } static void __ath10k_htt_tx_txq_sync(struct ath10k *ar) { u32 seq; size_t size; lockdep_assert_held(&ar->htt.tx_lock); if (!ar->htt.tx_q_state.enabled) return; if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL) return; seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq); seq++; ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq); ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n", seq); size = sizeof(*ar->htt.tx_q_state.vaddr); dma_sync_single_for_device(ar->dev, ar->htt.tx_q_state.paddr, size, DMA_TO_DEVICE); } void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { struct ath10k *ar = hw->priv; spin_lock_bh(&ar->htt.tx_lock); __ath10k_htt_tx_txq_recalc(hw, txq); spin_unlock_bh(&ar->htt.tx_lock); } void ath10k_htt_tx_txq_sync(struct ath10k *ar) { spin_lock_bh(&ar->htt.tx_lock); __ath10k_htt_tx_txq_sync(ar); spin_unlock_bh(&ar->htt.tx_lock); } void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { struct ath10k *ar = hw->priv; spin_lock_bh(&ar->htt.tx_lock); __ath10k_htt_tx_txq_recalc(hw, txq); __ath10k_htt_tx_txq_sync(ar); spin_unlock_bh(&ar->htt.tx_lock); } void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) { lockdep_assert_held(&htt->tx_lock); htt->num_pending_tx--; if (htt->num_pending_tx == htt->max_num_pending_tx - 1) ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); if (htt->num_pending_tx == 0) wake_up(&htt->empty_tx_wq); } int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt) { lockdep_assert_held(&htt->tx_lock); if (htt->num_pending_tx >= htt->max_num_pending_tx) return -EBUSY; htt->num_pending_tx++; if (htt->num_pending_tx == htt->max_num_pending_tx) ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); return 0; } int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt, bool is_presp) { struct ath10k *ar = htt->ar; lockdep_assert_held(&htt->tx_lock); if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres) return 0; if (is_presp && ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx) return -EBUSY; htt->num_pending_mgmt_tx++; return 0; } void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt) { lockdep_assert_held(&htt->tx_lock); if (!htt->ar->hw_params.max_probe_resp_desc_thres) return; htt->num_pending_mgmt_tx--; } int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb) { struct ath10k *ar = htt->ar; int ret; spin_lock_bh(&htt->tx_lock); ret = idr_alloc(&htt->pending_tx, skb, 0, htt->max_num_pending_tx, GFP_ATOMIC); spin_unlock_bh(&htt->tx_lock); ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret); return ret; } void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) { struct ath10k *ar = htt->ar; lockdep_assert_held(&htt->tx_lock); ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %u\n", msdu_id); idr_remove(&htt->pending_tx, msdu_id); } static void ath10k_htt_tx_free_cont_txbuf_32(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; size_t size; if (!htt->txbuf.vaddr_txbuff_32) return; size = htt->txbuf.size; dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_32, htt->txbuf.paddr); htt->txbuf.vaddr_txbuff_32 = NULL; } static int ath10k_htt_tx_alloc_cont_txbuf_32(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; size_t size; size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf_32); htt->txbuf.vaddr_txbuff_32 = dma_alloc_coherent(ar->dev, size, &htt->txbuf.paddr, GFP_KERNEL); if (!htt->txbuf.vaddr_txbuff_32) return -ENOMEM; htt->txbuf.size = size; return 0; } static void ath10k_htt_tx_free_cont_txbuf_64(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; size_t size; if (!htt->txbuf.vaddr_txbuff_64) return; size = htt->txbuf.size; dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_64, htt->txbuf.paddr); htt->txbuf.vaddr_txbuff_64 = NULL; } static int ath10k_htt_tx_alloc_cont_txbuf_64(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; size_t size; size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf_64); htt->txbuf.vaddr_txbuff_64 = dma_alloc_coherent(ar->dev, size, &htt->txbuf.paddr, GFP_KERNEL); if (!htt->txbuf.vaddr_txbuff_64) return -ENOMEM; htt->txbuf.size = size; return 0; } static void ath10k_htt_tx_free_cont_frag_desc_32(struct ath10k_htt *htt) { size_t size; if (!htt->frag_desc.vaddr_desc_32) return; size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr_desc_32, htt->frag_desc.paddr); htt->frag_desc.vaddr_desc_32 = NULL; } static int ath10k_htt_tx_alloc_cont_frag_desc_32(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; size_t size; if (!ar->hw_params.continuous_frag_desc) return 0; size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); htt->frag_desc.vaddr_desc_32 = dma_alloc_coherent(ar->dev, size, &htt->frag_desc.paddr, GFP_KERNEL); if (!htt->frag_desc.vaddr_desc_32) { ath10k_err(ar, "failed to alloc fragment desc memory\n"); return -ENOMEM; } htt->frag_desc.size = size; return 0; } static void ath10k_htt_tx_free_cont_frag_desc_64(struct ath10k_htt *htt) { size_t size; if (!htt->frag_desc.vaddr_desc_64) return; size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc_64); dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr_desc_64, htt->frag_desc.paddr); htt->frag_desc.vaddr_desc_64 = NULL; } static int ath10k_htt_tx_alloc_cont_frag_desc_64(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; size_t size; if (!ar->hw_params.continuous_frag_desc) return 0; size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc_64); htt->frag_desc.vaddr_desc_64 = dma_alloc_coherent(ar->dev, size, &htt->frag_desc.paddr, GFP_KERNEL); if (!htt->frag_desc.vaddr_desc_64) { ath10k_err(ar, "failed to alloc fragment desc memory\n"); return -ENOMEM; } htt->frag_desc.size = size; return 0; } static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; size_t size; if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->running_fw->fw_file.fw_features)) return; size = sizeof(*htt->tx_q_state.vaddr); dma_unmap_single(ar->dev, htt->tx_q_state.paddr, size, DMA_TO_DEVICE); kfree(htt->tx_q_state.vaddr); } static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; size_t size; int ret; if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->running_fw->fw_file.fw_features)) return 0; htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS; htt->tx_q_state.num_tids = HTT_TX_Q_STATE_NUM_TIDS; htt->tx_q_state.type = HTT_Q_DEPTH_TYPE_BYTES; size = sizeof(*htt->tx_q_state.vaddr); htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL); if (!htt->tx_q_state.vaddr) return -ENOMEM; htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr, size, DMA_TO_DEVICE); ret = dma_mapping_error(ar->dev, htt->tx_q_state.paddr); if (ret) { ath10k_warn(ar, "failed to dma map tx_q_state: %d\n", ret); kfree(htt->tx_q_state.vaddr); return -EIO; } return 0; } static void ath10k_htt_tx_free_txdone_fifo(struct ath10k_htt *htt) { WARN_ON(!kfifo_is_empty(&htt->txdone_fifo)); kfifo_free(&htt->txdone_fifo); } static int ath10k_htt_tx_alloc_txdone_fifo(struct ath10k_htt *htt) { int ret; size_t size; size = roundup_pow_of_two(htt->max_num_pending_tx); ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL); return ret; } static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; int ret; ret = ath10k_htt_alloc_txbuff(htt); if (ret) { ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret); return ret; } ret = ath10k_htt_alloc_frag_desc(htt); if (ret) { ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret); goto free_txbuf; } ret = ath10k_htt_tx_alloc_txq(htt); if (ret) { ath10k_err(ar, "failed to alloc txq: %d\n", ret); goto free_frag_desc; } ret = ath10k_htt_tx_alloc_txdone_fifo(htt); if (ret) { ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret); goto free_txq; } return 0; free_txq: ath10k_htt_tx_free_txq(htt); free_frag_desc: ath10k_htt_free_frag_desc(htt); free_txbuf: ath10k_htt_free_txbuff(htt); return ret; } int ath10k_htt_tx_start(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; int ret; ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", htt->max_num_pending_tx); spin_lock_init(&htt->tx_lock); idr_init(&htt->pending_tx); if (htt->tx_mem_allocated) return 0; if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) return 0; ret = ath10k_htt_tx_alloc_buf(htt); if (ret) goto free_idr_pending_tx; htt->tx_mem_allocated = true; return 0; free_idr_pending_tx: idr_destroy(&htt->pending_tx); return ret; } static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx) { struct ath10k *ar = ctx; struct ath10k_htt *htt = &ar->htt; struct htt_tx_done tx_done = {}; ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %u\n", msdu_id); tx_done.msdu_id = msdu_id; tx_done.status = HTT_TX_COMPL_STATE_DISCARD; ath10k_txrx_tx_unref(htt, &tx_done); return 0; } void ath10k_htt_tx_destroy(struct ath10k_htt *htt) { if (!htt->tx_mem_allocated) return; ath10k_htt_free_txbuff(htt); ath10k_htt_tx_free_txq(htt); ath10k_htt_free_frag_desc(htt); ath10k_htt_tx_free_txdone_fifo(htt); htt->tx_mem_allocated = false; } static void ath10k_htt_flush_tx_queue(struct ath10k_htt *htt) { ath10k_htc_stop_hl(htt->ar); idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar); } void ath10k_htt_tx_stop(struct ath10k_htt *htt) { ath10k_htt_flush_tx_queue(htt); idr_destroy(&htt->pending_tx); } void ath10k_htt_tx_free(struct ath10k_htt *htt) { ath10k_htt_tx_stop(htt); ath10k_htt_tx_destroy(htt); } void ath10k_htt_op_ep_tx_credits(struct ath10k *ar) { queue_work(ar->workqueue, &ar->bundle_tx_work); } void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) { struct ath10k_htt *htt = &ar->htt; struct htt_tx_done tx_done = {}; struct htt_cmd_hdr *htt_hdr; struct htt_data_tx_desc *desc_hdr = NULL; u16 flags1 = 0; u8 msg_type = 0; if (htt->disable_tx_comp) { htt_hdr = (struct htt_cmd_hdr *)skb->data; msg_type = htt_hdr->msg_type; if (msg_type == HTT_H2T_MSG_TYPE_TX_FRM) { desc_hdr = (struct htt_data_tx_desc *) (skb->data + sizeof(*htt_hdr)); flags1 = __le16_to_cpu(desc_hdr->flags1); skb_pull(skb, sizeof(struct htt_cmd_hdr)); skb_pull(skb, sizeof(struct htt_data_tx_desc)); } } dev_kfree_skb_any(skb); if ((!htt->disable_tx_comp) || (msg_type != HTT_H2T_MSG_TYPE_TX_FRM)) return; ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx complete msdu id:%u ,flags1:%x\n", __le16_to_cpu(desc_hdr->id), flags1); if (flags1 & HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE) return; tx_done.status = HTT_TX_COMPL_STATE_ACK; tx_done.msdu_id = __le16_to_cpu(desc_hdr->id); ath10k_txrx_tx_unref(&ar->htt, &tx_done); } void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb) { dev_kfree_skb_any(skb); } EXPORT_SYMBOL(ath10k_htt_hif_tx_complete); int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; struct sk_buff *skb; struct htt_cmd *cmd; int len = 0; int ret; len += sizeof(cmd->hdr); len += sizeof(cmd->ver_req); skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ; ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { dev_kfree_skb_any(skb); return ret; } return 0; } int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u32 mask, u32 reset_mask, u64 cookie) { struct ath10k *ar = htt->ar; struct htt_stats_req *req; struct sk_buff *skb; struct htt_cmd *cmd; int len = 0, ret; len += sizeof(cmd->hdr); len += sizeof(cmd->stats_req); skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ; req = &cmd->stats_req; memset(req, 0, sizeof(*req)); /* currently we support only max 24 bit masks so no need to worry * about endian support */ memcpy(req->upload_types, &mask, 3); memcpy(req->reset_types, &reset_mask, 3); req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID; req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff); req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { ath10k_warn(ar, "failed to send htt type stats request: %d", ret); dev_kfree_skb_any(skb); return ret; } return 0; } static int ath10k_htt_send_frag_desc_bank_cfg_32(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; struct sk_buff *skb; struct htt_cmd *cmd; struct htt_frag_desc_bank_cfg32 *cfg; int ret, size; u8 info; if (!ar->hw_params.continuous_frag_desc) return 0; if (!htt->frag_desc.paddr) { ath10k_warn(ar, "invalid frag desc memory\n"); return -EINVAL; } size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg32); skb = ath10k_htc_alloc_skb(ar, size); if (!skb) return -ENOMEM; skb_put(skb, size); cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG; info = 0; info |= SM(htt->tx_q_state.type, HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE); if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->running_fw->fw_file.fw_features)) info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID; cfg = &cmd->frag_desc_bank_cfg32; cfg->info = info; cfg->num_banks = 1; cfg->desc_size = sizeof(struct htt_msdu_ext_desc); cfg->bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr); cfg->bank_id[0].bank_min_id = 0; cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx - 1); cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr); cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers); cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids); cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE; cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER; ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n"); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n", ret); dev_kfree_skb_any(skb); return ret; } return 0; } static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; struct sk_buff *skb; struct htt_cmd *cmd; struct htt_frag_desc_bank_cfg64 *cfg; int ret, size; u8 info; if (!ar->hw_params.continuous_frag_desc) return 0; if (!htt->frag_desc.paddr) { ath10k_warn(ar, "invalid frag desc memory\n"); return -EINVAL; } size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg64); skb = ath10k_htc_alloc_skb(ar, size); if (!skb) return -ENOMEM; skb_put(skb, size); cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG; info = 0; info |= SM(htt->tx_q_state.type, HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE); if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->running_fw->fw_file.fw_features)) info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID; cfg = &cmd->frag_desc_bank_cfg64; cfg->info = info; cfg->num_banks = 1; cfg->desc_size = sizeof(struct htt_msdu_ext_desc_64); cfg->bank_base_addrs[0] = __cpu_to_le64(htt->frag_desc.paddr); cfg->bank_id[0].bank_min_id = 0; cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx - 1); cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr); cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers); cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids); cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE; cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER; ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n"); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n", ret); dev_kfree_skb_any(skb); return ret; } return 0; } static void ath10k_htt_fill_rx_desc_offset_32(struct ath10k_hw_params *hw, struct htt_rx_ring_setup_ring32 *rx_ring) { ath10k_htt_rx_desc_get_offsets(hw, &rx_ring->offsets); } static void ath10k_htt_fill_rx_desc_offset_64(struct ath10k_hw_params *hw, struct htt_rx_ring_setup_ring64 *rx_ring) { ath10k_htt_rx_desc_get_offsets(hw, &rx_ring->offsets); } static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; struct ath10k_hw_params *hw = &ar->hw_params; struct sk_buff *skb; struct htt_cmd *cmd; struct htt_rx_ring_setup_ring32 *ring; const int num_rx_ring = 1; u16 flags; u32 fw_idx; int len; int ret; /* * the HW expects the buffer to be an integral number of 4-byte * "words" */ BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr) + (sizeof(*ring) * num_rx_ring); skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; ring = &cmd->rx_setup_32.rings[0]; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; cmd->rx_setup_32.hdr.num_rings = 1; /* FIXME: do we need all of this? */ flags = 0; flags |= HTT_RX_RING_FLAGS_MAC80211_HDR; flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; flags |= HTT_RX_RING_FLAGS_PPDU_START; flags |= HTT_RX_RING_FLAGS_PPDU_END; flags |= HTT_RX_RING_FLAGS_MPDU_START; flags |= HTT_RX_RING_FLAGS_MPDU_END; flags |= HTT_RX_RING_FLAGS_MSDU_START; flags |= HTT_RX_RING_FLAGS_MSDU_END; flags |= HTT_RX_RING_FLAGS_RX_ATTENTION; flags |= HTT_RX_RING_FLAGS_FRAG_INFO; flags |= HTT_RX_RING_FLAGS_UNICAST_RX; flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; flags |= HTT_RX_RING_FLAGS_CTRL_RX; flags |= HTT_RX_RING_FLAGS_MGMT_RX; flags |= HTT_RX_RING_FLAGS_NULL_RX; flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX; fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); ring->fw_idx_shadow_reg_paddr = __cpu_to_le32(htt->rx_ring.alloc_idx.paddr); ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr); ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); ring->flags = __cpu_to_le16(flags); ring->fw_idx_init_val = __cpu_to_le16(fw_idx); ath10k_htt_fill_rx_desc_offset_32(hw, ring); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { dev_kfree_skb_any(skb); return ret; } return 0; } static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; struct ath10k_hw_params *hw = &ar->hw_params; struct sk_buff *skb; struct htt_cmd *cmd; struct htt_rx_ring_setup_ring64 *ring; const int num_rx_ring = 1; u16 flags; u32 fw_idx; int len; int ret; /* HW expects the buffer to be an integral number of 4-byte * "words" */ BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_64.hdr) + (sizeof(*ring) * num_rx_ring); skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; ring = &cmd->rx_setup_64.rings[0]; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; cmd->rx_setup_64.hdr.num_rings = 1; flags = 0; flags |= HTT_RX_RING_FLAGS_MAC80211_HDR; flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; flags |= HTT_RX_RING_FLAGS_PPDU_START; flags |= HTT_RX_RING_FLAGS_PPDU_END; flags |= HTT_RX_RING_FLAGS_MPDU_START; flags |= HTT_RX_RING_FLAGS_MPDU_END; flags |= HTT_RX_RING_FLAGS_MSDU_START; flags |= HTT_RX_RING_FLAGS_MSDU_END; flags |= HTT_RX_RING_FLAGS_RX_ATTENTION; flags |= HTT_RX_RING_FLAGS_FRAG_INFO; flags |= HTT_RX_RING_FLAGS_UNICAST_RX; flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; flags |= HTT_RX_RING_FLAGS_CTRL_RX; flags |= HTT_RX_RING_FLAGS_MGMT_RX; flags |= HTT_RX_RING_FLAGS_NULL_RX; flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX; fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); ring->fw_idx_shadow_reg_paddr = __cpu_to_le64(htt->rx_ring.alloc_idx.paddr); ring->rx_ring_base_paddr = __cpu_to_le64(htt->rx_ring.base_paddr); ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); ring->flags = __cpu_to_le16(flags); ring->fw_idx_init_val = __cpu_to_le16(fw_idx); ath10k_htt_fill_rx_desc_offset_64(hw, ring); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { dev_kfree_skb_any(skb); return ret; } return 0; } static int ath10k_htt_send_rx_ring_cfg_hl(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; struct sk_buff *skb; struct htt_cmd *cmd; struct htt_rx_ring_setup_ring32 *ring; const int num_rx_ring = 1; u16 flags; int len; int ret; /* * the HW expects the buffer to be an integral number of 4-byte * "words" */ BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr) + (sizeof(*ring) * num_rx_ring); skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; ring = &cmd->rx_setup_32.rings[0]; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; cmd->rx_setup_32.hdr.num_rings = 1; flags = 0; flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; flags |= HTT_RX_RING_FLAGS_UNICAST_RX; flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; memset(ring, 0, sizeof(*ring)); ring->rx_ring_len = __cpu_to_le16(HTT_RX_RING_SIZE_MIN); ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); ring->flags = __cpu_to_le16(flags); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { dev_kfree_skb_any(skb); return ret; } return 0; } static int ath10k_htt_h2t_aggr_cfg_msg_32(struct ath10k_htt *htt, u8 max_subfrms_ampdu, u8 max_subfrms_amsdu) { struct ath10k *ar = htt->ar; struct htt_aggr_conf *aggr_conf; struct sk_buff *skb; struct htt_cmd *cmd; int len; int ret; /* Firmware defaults are: amsdu = 3 and ampdu = 64 */ if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64) return -EINVAL; if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31) return -EINVAL; len = sizeof(cmd->hdr); len += sizeof(cmd->aggr_conf); skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG; aggr_conf = &cmd->aggr_conf; aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu; aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu; ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d", aggr_conf->max_num_amsdu_subframes, aggr_conf->max_num_ampdu_subframes); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { dev_kfree_skb_any(skb); return ret; } return 0; } static int ath10k_htt_h2t_aggr_cfg_msg_v2(struct ath10k_htt *htt, u8 max_subfrms_ampdu, u8 max_subfrms_amsdu) { struct ath10k *ar = htt->ar; struct htt_aggr_conf_v2 *aggr_conf; struct sk_buff *skb; struct htt_cmd *cmd; int len; int ret; /* Firmware defaults are: amsdu = 3 and ampdu = 64 */ if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64) return -EINVAL; if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31) return -EINVAL; len = sizeof(cmd->hdr); len += sizeof(cmd->aggr_conf_v2); skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG; aggr_conf = &cmd->aggr_conf_v2; aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu; aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu; ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d", aggr_conf->max_num_amsdu_subframes, aggr_conf->max_num_ampdu_subframes); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { dev_kfree_skb_any(skb); return ret; } return 0; } int ath10k_htt_tx_fetch_resp(struct ath10k *ar, __le32 token, __le16 fetch_seq_num, struct htt_tx_fetch_record *records, size_t num_records) { struct sk_buff *skb; struct htt_cmd *cmd; const u16 resp_id = 0; int len = 0; int ret; /* Response IDs are echo-ed back only for host driver convenience * purposes. They aren't used for anything in the driver yet so use 0. */ len += sizeof(cmd->hdr); len += sizeof(cmd->tx_fetch_resp); len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records; skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP; cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id); cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num; cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records); cmd->tx_fetch_resp.token = token; memcpy(cmd->tx_fetch_resp.records, records, sizeof(records[0]) * num_records); ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb); if (ret) { ath10k_warn(ar, "failed to submit htc command: %d\n", ret); goto err_free_skb; } return 0; err_free_skb: dev_kfree_skb_any(skb); return ret; } static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); struct ath10k_vif *arvif; if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { return ar->scan.vdev_id; } else if (cb->vif) { arvif = (void *)cb->vif->drv_priv; return arvif->vdev_id; } else if (ar->monitor_started) { return ar->monitor_vdev_id; } else { return 0; } } static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth) { struct ieee80211_hdr *hdr = (void *)skb->data; struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); if (!is_eth && ieee80211_is_mgmt(hdr->frame_control)) return HTT_DATA_TX_EXT_TID_MGMT; else if (cb->flags & ATH10K_SKB_F_QOS) return skb->priority & IEEE80211_QOS_CTL_TID_MASK; else return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; } int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { struct ath10k *ar = htt->ar; struct device *dev = ar->dev; struct sk_buff *txdesc = NULL; struct htt_cmd *cmd; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); int len = 0; int msdu_id = -1; int res; const u8 *peer_addr; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; len += sizeof(cmd->hdr); len += sizeof(cmd->mgmt_tx); res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); if (res < 0) goto err; msdu_id = res; if ((ieee80211_is_action(hdr->frame_control) || ieee80211_is_deauth(hdr->frame_control) || ieee80211_is_disassoc(hdr->frame_control)) && ieee80211_has_protected(hdr->frame_control)) { peer_addr = hdr->addr1; if (is_multicast_ether_addr(peer_addr)) { skb_put(msdu, sizeof(struct ieee80211_mmie_16)); } else { if (skb_cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP || skb_cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP_256) skb_put(msdu, IEEE80211_GCMP_MIC_LEN); else skb_put(msdu, IEEE80211_CCMP_MIC_LEN); } } txdesc = ath10k_htc_alloc_skb(ar, len); if (!txdesc) { res = -ENOMEM; goto err_free_msdu_id; } skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, DMA_TO_DEVICE); res = dma_mapping_error(dev, skb_cb->paddr); if (res) { res = -EIO; goto err_free_txdesc; } skb_put(txdesc, len); cmd = (struct htt_cmd *)txdesc->data; memset(cmd, 0, len); cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); cmd->mgmt_tx.len = __cpu_to_le32(msdu->len); cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); memcpy(cmd->mgmt_tx.hdr, msdu->data, min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) goto err_unmap_msdu; return 0; err_unmap_msdu: if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_txdesc: dev_kfree_skb_any(txdesc); err_free_msdu_id: spin_lock_bh(&htt->tx_lock); ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); err: return res; } #define HTT_TX_HL_NEEDED_HEADROOM \ (unsigned int)(sizeof(struct htt_cmd_hdr) + \ sizeof(struct htt_data_tx_desc) + \ sizeof(struct ath10k_htc_hdr)) static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, struct sk_buff *msdu) { struct ath10k *ar = htt->ar; int res, data_len; struct htt_cmd_hdr *cmd_hdr; struct htt_data_tx_desc *tx_desc; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); struct sk_buff *tmp_skb; bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET); u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth); u8 flags0 = 0; u16 flags1 = 0; u16 msdu_id = 0; if (!is_eth) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; if ((ieee80211_is_action(hdr->frame_control) || ieee80211_is_deauth(hdr->frame_control) || ieee80211_is_disassoc(hdr->frame_control)) && ieee80211_has_protected(hdr->frame_control)) { skb_put(msdu, IEEE80211_CCMP_MIC_LEN); } } data_len = msdu->len; switch (txmode) { case ATH10K_HW_TXRX_RAW: case ATH10K_HW_TXRX_NATIVE_WIFI: flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; fallthrough; case ATH10K_HW_TXRX_ETHERNET: flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); break; case ATH10K_HW_TXRX_MGMT: flags0 |= SM(ATH10K_HW_TXRX_MGMT, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; if (htt->disable_tx_comp) flags1 |= HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE; break; } if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); if (msdu->ip_summed == CHECKSUM_PARTIAL && !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; } /* Prepend the HTT header and TX desc struct to the data message * and realloc the skb if it does not have enough headroom. */ if (skb_headroom(msdu) < HTT_TX_HL_NEEDED_HEADROOM) { tmp_skb = msdu; ath10k_dbg(htt->ar, ATH10K_DBG_HTT, "Not enough headroom in skb. Current headroom: %u, needed: %u. Reallocating...\n", skb_headroom(msdu), HTT_TX_HL_NEEDED_HEADROOM); msdu = skb_realloc_headroom(msdu, HTT_TX_HL_NEEDED_HEADROOM); kfree_skb(tmp_skb); if (!msdu) { ath10k_warn(htt->ar, "htt hl tx: Unable to realloc skb!\n"); res = -ENOMEM; goto out; } } if (ar->bus_param.hl_msdu_ids) { flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); if (res < 0) { ath10k_err(ar, "msdu_id allocation failed %d\n", res); goto out; } msdu_id = res; } /* As msdu is freed by mac80211 (in ieee80211_tx_status()) and by * ath10k (in ath10k_htt_htc_tx_complete()) we have to increase * reference by one to avoid a use-after-free case and a double * free. */ skb_get(msdu); skb_push(msdu, sizeof(*cmd_hdr)); skb_push(msdu, sizeof(*tx_desc)); cmd_hdr = (struct htt_cmd_hdr *)msdu->data; tx_desc = (struct htt_data_tx_desc *)(msdu->data + sizeof(*cmd_hdr)); cmd_hdr->msg_type = HTT_H2T_MSG_TYPE_TX_FRM; tx_desc->flags0 = flags0; tx_desc->flags1 = __cpu_to_le16(flags1); tx_desc->len = __cpu_to_le16(data_len); tx_desc->id = __cpu_to_le16(msdu_id); tx_desc->frags_paddr = 0; /* always zero */ /* Initialize peer_id to INVALID_PEER because this is NOT * Reinjection path */ tx_desc->peerid = __cpu_to_le32(HTT_INVALID_PEERID); res = ath10k_htc_send_hl(&htt->ar->htc, htt->eid, msdu); out: return res; } static int ath10k_htt_tx_32(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, struct sk_buff *msdu) { struct ath10k *ar = htt->ar; struct device *dev = ar->dev; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); struct ath10k_hif_sg_item sg_items[2]; struct ath10k_htt_txbuf_32 *txbuf; struct htt_data_tx_desc_frag *frags; bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET); u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth); int prefetch_len; int res; u8 flags0 = 0; u16 msdu_id, flags1 = 0; u16 freq = 0; u32 frags_paddr = 0; u32 txbuf_paddr; struct htt_msdu_ext_desc *ext_desc = NULL; struct htt_msdu_ext_desc *ext_desc_t = NULL; res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); if (res < 0) goto err; msdu_id = res; prefetch_len = min(htt->prefetch_len, msdu->len); prefetch_len = roundup(prefetch_len, 4); txbuf = htt->txbuf.vaddr_txbuff_32 + msdu_id; txbuf_paddr = htt->txbuf.paddr + (sizeof(struct ath10k_htt_txbuf_32) * msdu_id); if (!is_eth) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; if ((ieee80211_is_action(hdr->frame_control) || ieee80211_is_deauth(hdr->frame_control) || ieee80211_is_disassoc(hdr->frame_control)) && ieee80211_has_protected(hdr->frame_control)) { skb_put(msdu, IEEE80211_CCMP_MIC_LEN); } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) && txmode == ATH10K_HW_TXRX_RAW && ieee80211_has_protected(hdr->frame_control)) { skb_put(msdu, IEEE80211_CCMP_MIC_LEN); } } skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, DMA_TO_DEVICE); res = dma_mapping_error(dev, skb_cb->paddr); if (res) { res = -EIO; goto err_free_msdu_id; } if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) freq = ar->scan.roc_freq; switch (txmode) { case ATH10K_HW_TXRX_RAW: case ATH10K_HW_TXRX_NATIVE_WIFI: flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; fallthrough; case ATH10K_HW_TXRX_ETHERNET: if (ar->hw_params.continuous_frag_desc) { ext_desc_t = htt->frag_desc.vaddr_desc_32; memset(&ext_desc_t[msdu_id], 0, sizeof(struct htt_msdu_ext_desc)); frags = (struct htt_data_tx_desc_frag *) &ext_desc_t[msdu_id].frags; ext_desc = &ext_desc_t[msdu_id]; frags[0].tword_addr.paddr_lo = __cpu_to_le32(skb_cb->paddr); frags[0].tword_addr.paddr_hi = 0; frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); frags_paddr = htt->frag_desc.paddr + (sizeof(struct htt_msdu_ext_desc) * msdu_id); } else { frags = txbuf->frags; frags[0].dword_addr.paddr = __cpu_to_le32(skb_cb->paddr); frags[0].dword_addr.len = __cpu_to_le32(msdu->len); frags[1].dword_addr.paddr = 0; frags[1].dword_addr.len = 0; frags_paddr = txbuf_paddr; } flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); break; case ATH10K_HW_TXRX_MGMT: flags0 |= SM(ATH10K_HW_TXRX_MGMT, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; frags_paddr = skb_cb->paddr; break; } /* Normally all commands go through HTC which manages tx credits for * each endpoint and notifies when tx is completed. * * HTT endpoint is creditless so there's no need to care about HTC * flags. In that case it is trivial to fill the HTC header here. * * MSDU transmission is considered completed upon HTT event. This * implies no relevant resources can be freed until after the event is * received. That's why HTC tx completion handler itself is ignored by * setting NULL to transfer_context for all sg items. * * There is simply no point in pushing HTT TX_FRM through HTC tx path * as it's a waste of resources. By bypassing HTC it is possible to * avoid extra memory allocations, compress data structures and thus * improve performance. */ txbuf->htc_hdr.eid = htt->eid; txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) + sizeof(txbuf->cmd_tx) + prefetch_len); txbuf->htc_hdr.flags = 0; if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); if (msdu->ip_summed == CHECKSUM_PARTIAL && !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; if (ar->hw_params.continuous_frag_desc) ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE; } /* Prevent firmware from sending up tx inspection requests. There's * nothing ath10k can do with frames requested for inspection so force * it to simply rely a regular tx completion with discard status. */ flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; txbuf->cmd_tx.flags0 = flags0; txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr); if (ath10k_mac_tx_frm_has_freq(ar)) { txbuf->cmd_tx.offchan_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID); txbuf->cmd_tx.offchan_tx.freq = __cpu_to_le16(freq); } else { txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID); } trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid); ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx flags0 %u flags1 %u len %d id %u frags_paddr %pad, msdu_paddr %pad vdev %u tid %u freq %u\n", flags0, flags1, msdu->len, msdu_id, &frags_paddr, &skb_cb->paddr, vdev_id, tid, freq); ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", msdu->data, msdu->len); trace_ath10k_tx_hdr(ar, msdu->data, msdu->len); trace_ath10k_tx_payload(ar, msdu->data, msdu->len); sg_items[0].transfer_id = 0; sg_items[0].transfer_context = NULL; sg_items[0].vaddr = &txbuf->htc_hdr; sg_items[0].paddr = txbuf_paddr + sizeof(txbuf->frags); sg_items[0].len = sizeof(txbuf->htc_hdr) + sizeof(txbuf->cmd_hdr) + sizeof(txbuf->cmd_tx); sg_items[1].transfer_id = 0; sg_items[1].transfer_context = NULL; sg_items[1].vaddr = msdu->data; sg_items[1].paddr = skb_cb->paddr; sg_items[1].len = prefetch_len; res = ath10k_hif_tx_sg(htt->ar, htt->ar->htc.endpoint[htt->eid].ul_pipe_id, sg_items, ARRAY_SIZE(sg_items)); if (res) goto err_unmap_msdu; return 0; err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_msdu_id: spin_lock_bh(&htt->tx_lock); ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); err: return res; } static int ath10k_htt_tx_64(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, struct sk_buff *msdu) { struct ath10k *ar = htt->ar; struct device *dev = ar->dev; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); struct ath10k_hif_sg_item sg_items[2]; struct ath10k_htt_txbuf_64 *txbuf; struct htt_data_tx_desc_frag *frags; bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET); u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth); int prefetch_len; int res; u8 flags0 = 0; u16 msdu_id, flags1 = 0; u16 freq = 0; dma_addr_t frags_paddr = 0; dma_addr_t txbuf_paddr; struct htt_msdu_ext_desc_64 *ext_desc = NULL; struct htt_msdu_ext_desc_64 *ext_desc_t = NULL; res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); if (res < 0) goto err; msdu_id = res; prefetch_len = min(htt->prefetch_len, msdu->len); prefetch_len = roundup(prefetch_len, 4); txbuf = htt->txbuf.vaddr_txbuff_64 + msdu_id; txbuf_paddr = htt->txbuf.paddr + (sizeof(struct ath10k_htt_txbuf_64) * msdu_id); if (!is_eth) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; if ((ieee80211_is_action(hdr->frame_control) || ieee80211_is_deauth(hdr->frame_control) || ieee80211_is_disassoc(hdr->frame_control)) && ieee80211_has_protected(hdr->frame_control)) { skb_put(msdu, IEEE80211_CCMP_MIC_LEN); } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) && txmode == ATH10K_HW_TXRX_RAW && ieee80211_has_protected(hdr->frame_control)) { skb_put(msdu, IEEE80211_CCMP_MIC_LEN); } } skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, DMA_TO_DEVICE); res = dma_mapping_error(dev, skb_cb->paddr); if (res) { res = -EIO; goto err_free_msdu_id; } if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) freq = ar->scan.roc_freq; switch (txmode) { case ATH10K_HW_TXRX_RAW: case ATH10K_HW_TXRX_NATIVE_WIFI: flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; fallthrough; case ATH10K_HW_TXRX_ETHERNET: if (ar->hw_params.continuous_frag_desc) { ext_desc_t = htt->frag_desc.vaddr_desc_64; memset(&ext_desc_t[msdu_id], 0, sizeof(struct htt_msdu_ext_desc_64)); frags = (struct htt_data_tx_desc_frag *) &ext_desc_t[msdu_id].frags; ext_desc = &ext_desc_t[msdu_id]; frags[0].tword_addr.paddr_lo = __cpu_to_le32(skb_cb->paddr); frags[0].tword_addr.paddr_hi = __cpu_to_le16(upper_32_bits(skb_cb->paddr)); frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); frags_paddr = htt->frag_desc.paddr + (sizeof(struct htt_msdu_ext_desc_64) * msdu_id); } else { frags = txbuf->frags; frags[0].tword_addr.paddr_lo = __cpu_to_le32(skb_cb->paddr); frags[0].tword_addr.paddr_hi = __cpu_to_le16(upper_32_bits(skb_cb->paddr)); frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); frags[1].tword_addr.paddr_lo = 0; frags[1].tword_addr.paddr_hi = 0; frags[1].tword_addr.len_16 = 0; } flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); break; case ATH10K_HW_TXRX_MGMT: flags0 |= SM(ATH10K_HW_TXRX_MGMT, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; frags_paddr = skb_cb->paddr; break; } /* Normally all commands go through HTC which manages tx credits for * each endpoint and notifies when tx is completed. * * HTT endpoint is creditless so there's no need to care about HTC * flags. In that case it is trivial to fill the HTC header here. * * MSDU transmission is considered completed upon HTT event. This * implies no relevant resources can be freed until after the event is * received. That's why HTC tx completion handler itself is ignored by * setting NULL to transfer_context for all sg items. * * There is simply no point in pushing HTT TX_FRM through HTC tx path * as it's a waste of resources. By bypassing HTC it is possible to * avoid extra memory allocations, compress data structures and thus * improve performance. */ txbuf->htc_hdr.eid = htt->eid; txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) + sizeof(txbuf->cmd_tx) + prefetch_len); txbuf->htc_hdr.flags = 0; if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); if (msdu->ip_summed == CHECKSUM_PARTIAL && !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; if (ar->hw_params.continuous_frag_desc) { memset(ext_desc->tso_flag, 0, sizeof(ext_desc->tso_flag)); ext_desc->tso_flag[3] |= __cpu_to_le32(HTT_MSDU_CHECKSUM_ENABLE_64); } } /* Prevent firmware from sending up tx inspection requests. There's * nothing ath10k can do with frames requested for inspection so force * it to simply rely a regular tx completion with discard status. */ flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; txbuf->cmd_tx.flags0 = flags0; txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); /* fill fragment descriptor */ txbuf->cmd_tx.frags_paddr = __cpu_to_le64(frags_paddr); if (ath10k_mac_tx_frm_has_freq(ar)) { txbuf->cmd_tx.offchan_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID); txbuf->cmd_tx.offchan_tx.freq = __cpu_to_le16(freq); } else { txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID); } trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid); ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx flags0 %u flags1 %u len %d id %u frags_paddr %pad, msdu_paddr %pad vdev %u tid %u freq %u\n", flags0, flags1, msdu->len, msdu_id, &frags_paddr, &skb_cb->paddr, vdev_id, tid, freq); ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", msdu->data, msdu->len); trace_ath10k_tx_hdr(ar, msdu->data, msdu->len); trace_ath10k_tx_payload(ar, msdu->data, msdu->len); sg_items[0].transfer_id = 0; sg_items[0].transfer_context = NULL; sg_items[0].vaddr = &txbuf->htc_hdr; sg_items[0].paddr = txbuf_paddr + sizeof(txbuf->frags); sg_items[0].len = sizeof(txbuf->htc_hdr) + sizeof(txbuf->cmd_hdr) + sizeof(txbuf->cmd_tx); sg_items[1].transfer_id = 0; sg_items[1].transfer_context = NULL; sg_items[1].vaddr = msdu->data; sg_items[1].paddr = skb_cb->paddr; sg_items[1].len = prefetch_len; res = ath10k_hif_tx_sg(htt->ar, htt->ar->htc.endpoint[htt->eid].ul_pipe_id, sg_items, ARRAY_SIZE(sg_items)); if (res) goto err_unmap_msdu; return 0; err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_msdu_id: spin_lock_bh(&htt->tx_lock); ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); err: return res; } static const struct ath10k_htt_tx_ops htt_tx_ops_32 = { .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_32, .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32, .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_32, .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_32, .htt_tx = ath10k_htt_tx_32, .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_32, .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_32, .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32, }; static const struct ath10k_htt_tx_ops htt_tx_ops_64 = { .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_64, .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_64, .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_64, .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_64, .htt_tx = ath10k_htt_tx_64, .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_64, .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64, .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_v2, }; static const struct ath10k_htt_tx_ops htt_tx_ops_hl = { .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_hl, .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32, .htt_tx = ath10k_htt_tx_hl, .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32, .htt_flush_tx = ath10k_htt_flush_tx_queue, }; void ath10k_htt_set_tx_ops(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) htt->tx_ops = &htt_tx_ops_hl; else if (ar->hw_params.target_64bit) htt->tx_ops = &htt_tx_ops_64; else htt->tx_ops = &htt_tx_ops_32; }
48 126 48 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 #undef TRACE_SYSTEM #define TRACE_SYSTEM irq_matrix #if !defined(_TRACE_IRQ_MATRIX_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_IRQ_MATRIX_H #include <linux/tracepoint.h> struct irq_matrix; struct cpumap; DECLARE_EVENT_CLASS(irq_matrix_global, TP_PROTO(struct irq_matrix *matrix), TP_ARGS(matrix), TP_STRUCT__entry( __field( unsigned int, online_maps ) __field( unsigned int, global_available ) __field( unsigned int, global_reserved ) __field( unsigned int, total_allocated ) ), TP_fast_assign( __entry->online_maps = matrix->online_maps; __entry->global_available = matrix->global_available; __entry->global_reserved = matrix->global_reserved; __entry->total_allocated = matrix->total_allocated; ), TP_printk("online_maps=%d global_avl=%u, global_rsvd=%u, total_alloc=%u", __entry->online_maps, __entry->global_available, __entry->global_reserved, __entry->total_allocated) ); DECLARE_EVENT_CLASS(irq_matrix_global_update, TP_PROTO(int bit, struct irq_matrix *matrix), TP_ARGS(bit, matrix), TP_STRUCT__entry( __field( int, bit ) __field( unsigned int, online_maps ) __field( unsigned int, global_available ) __field( unsigned int, global_reserved ) __field( unsigned int, total_allocated ) ), TP_fast_assign( __entry->bit = bit; __entry->online_maps = matrix->online_maps; __entry->global_available = matrix->global_available; __entry->global_reserved = matrix->global_reserved; __entry->total_allocated = matrix->total_allocated; ), TP_printk("bit=%d online_maps=%d global_avl=%u, global_rsvd=%u, total_alloc=%u", __entry->bit, __entry->online_maps, __entry->global_available, __entry->global_reserved, __entry->total_allocated) ); DECLARE_EVENT_CLASS(irq_matrix_cpu, TP_PROTO(int bit, unsigned int cpu, struct irq_matrix *matrix, struct cpumap *cmap), TP_ARGS(bit, cpu, matrix, cmap), TP_STRUCT__entry( __field( int, bit ) __field( unsigned int, cpu ) __field( bool, online ) __field( unsigned int, available ) __field( unsigned int, allocated ) __field( unsigned int, managed ) __field( unsigned int, online_maps ) __field( unsigned int, global_available ) __field( unsigned int, global_reserved ) __field( unsigned int, total_allocated ) ), TP_fast_assign( __entry->bit = bit; __entry->cpu = cpu; __entry->online = cmap->online; __entry->available = cmap->available; __entry->allocated = cmap->allocated; __entry->managed = cmap->managed; __entry->online_maps = matrix->online_maps; __entry->global_available = matrix->global_available; __entry->global_reserved = matrix->global_reserved; __entry->total_allocated = matrix->total_allocated; ), TP_printk("bit=%d cpu=%u online=%d avl=%u alloc=%u managed=%u online_maps=%u global_avl=%u, global_rsvd=%u, total_alloc=%u", __entry->bit, __entry->cpu, __entry->online, __entry->available, __entry->allocated, __entry->managed, __entry->online_maps, __entry->global_available, __entry->global_reserved, __entry->total_allocated) ); DEFINE_EVENT(irq_matrix_global, irq_matrix_online, TP_PROTO(struct irq_matrix *matrix), TP_ARGS(matrix) ); DEFINE_EVENT(irq_matrix_global, irq_matrix_offline, TP_PROTO(struct irq_matrix *matrix), TP_ARGS(matrix) ); DEFINE_EVENT(irq_matrix_global, irq_matrix_reserve, TP_PROTO(struct irq_matrix *matrix), TP_ARGS(matrix) ); DEFINE_EVENT(irq_matrix_global, irq_matrix_remove_reserved, TP_PROTO(struct irq_matrix *matrix), TP_ARGS(matrix) ); DEFINE_EVENT(irq_matrix_global_update, irq_matrix_assign_system, TP_PROTO(int bit, struct irq_matrix *matrix), TP_ARGS(bit, matrix) ); DEFINE_EVENT(irq_matrix_cpu, irq_matrix_reserve_managed, TP_PROTO(int bit, unsigned int cpu, struct irq_matrix *matrix, struct cpumap *cmap), TP_ARGS(bit, cpu, matrix, cmap) ); DEFINE_EVENT(irq_matrix_cpu, irq_matrix_remove_managed, TP_PROTO(int bit, unsigned int cpu, struct irq_matrix *matrix, struct cpumap *cmap), TP_ARGS(bit, cpu, matrix, cmap) ); DEFINE_EVENT(irq_matrix_cpu, irq_matrix_alloc_managed, TP_PROTO(int bit, unsigned int cpu, struct irq_matrix *matrix, struct cpumap *cmap), TP_ARGS(bit, cpu, matrix, cmap) ); DEFINE_EVENT(irq_matrix_cpu, irq_matrix_assign, TP_PROTO(int bit, unsigned int cpu, struct irq_matrix *matrix, struct cpumap *cmap), TP_ARGS(bit, cpu, matrix, cmap) ); DEFINE_EVENT(irq_matrix_cpu, irq_matrix_alloc, TP_PROTO(int bit, unsigned int cpu, struct irq_matrix *matrix, struct cpumap *cmap), TP_ARGS(bit, cpu, matrix, cmap) ); DEFINE_EVENT(irq_matrix_cpu, irq_matrix_free, TP_PROTO(int bit, unsigned int cpu, struct irq_matrix *matrix, struct cpumap *cmap), TP_ARGS(bit, cpu, matrix, cmap) ); #endif /* _TRACE_IRQ_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
23 9 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * pm_wakeup.h - Power management wakeup interface * * Copyright (C) 2008 Alan Stern * Copyright (C) 2010 Rafael J. Wysocki, Novell Inc. */ #ifndef _LINUX_PM_WAKEUP_H #define _LINUX_PM_WAKEUP_H #ifndef _DEVICE_H_ # error "Please do not include this file directly." #endif #include <linux/types.h> struct wake_irq; /** * struct wakeup_source - Representation of wakeup sources * * @name: Name of the wakeup source * @id: Wakeup source id * @entry: Wakeup source list entry * @lock: Wakeup source lock * @wakeirq: Optional device specific wakeirq * @timer: Wakeup timer list * @timer_expires: Wakeup timer expiration * @total_time: Total time this wakeup source has been active. * @max_time: Maximum time this wakeup source has been continuously active. * @last_time: Monotonic clock when the wakeup source's was touched last time. * @prevent_sleep_time: Total time this source has been preventing autosleep. * @event_count: Number of signaled wakeup events. * @active_count: Number of times the wakeup source was activated. * @relax_count: Number of times the wakeup source was deactivated. * @expire_count: Number of times the wakeup source's timeout has expired. * @wakeup_count: Number of times the wakeup source might abort suspend. * @dev: Struct device for sysfs statistics about the wakeup source. * @active: Status of the wakeup source. * @autosleep_enabled: Autosleep is active, so update @prevent_sleep_time. */ struct wakeup_source { const char *name; int id; struct list_head entry; spinlock_t lock; struct wake_irq *wakeirq; struct timer_list timer; unsigned long timer_expires; ktime_t total_time; ktime_t max_time; ktime_t last_time; ktime_t start_prevent_time; ktime_t prevent_sleep_time; unsigned long event_count; unsigned long active_count; unsigned long relax_count; unsigned long expire_count; unsigned long wakeup_count; struct device *dev; bool active:1; bool autosleep_enabled:1; }; #define for_each_wakeup_source(ws) \ for ((ws) = wakeup_sources_walk_start(); \ (ws); \ (ws) = wakeup_sources_walk_next((ws))) #ifdef CONFIG_PM_SLEEP /* * Changes to device_may_wakeup take effect on the next pm state change. */ static inline bool device_can_wakeup(struct device *dev) { return dev->power.can_wakeup; } static inline bool device_may_wakeup(struct device *dev) { return dev->power.can_wakeup && !!dev->power.wakeup; } static inline bool device_wakeup_path(struct device *dev) { return dev->power.wakeup_path; } static inline void device_set_wakeup_path(struct device *dev) { dev->power.wakeup_path = true; } static inline void device_set_out_band_wakeup(struct device *dev) { dev->power.out_band_wakeup = true; } static inline bool device_out_band_wakeup(struct device *dev) { return dev->power.out_band_wakeup; } /* drivers/base/power/wakeup.c */ extern struct wakeup_source *wakeup_source_register(struct device *dev, const char *name); extern void wakeup_source_unregister(struct wakeup_source *ws); extern int wakeup_sources_read_lock(void); extern void wakeup_sources_read_unlock(int idx); extern struct wakeup_source *wakeup_sources_walk_start(void); extern struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws); extern int device_wakeup_enable(struct device *dev); extern void device_wakeup_disable(struct device *dev); extern void device_set_wakeup_capable(struct device *dev, bool capable); extern int device_set_wakeup_enable(struct device *dev, bool enable); extern void __pm_stay_awake(struct wakeup_source *ws); extern void pm_stay_awake(struct device *dev); extern void __pm_relax(struct wakeup_source *ws); extern void pm_relax(struct device *dev); extern void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard); extern void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard); #else /* !CONFIG_PM_SLEEP */ static inline void device_set_wakeup_capable(struct device *dev, bool capable) { dev->power.can_wakeup = capable; } static inline bool device_can_wakeup(struct device *dev) { return dev->power.can_wakeup; } static inline struct wakeup_source *wakeup_source_register(struct device *dev, const char *name) { return NULL; } static inline void wakeup_source_unregister(struct wakeup_source *ws) {} static inline int device_wakeup_enable(struct device *dev) { dev->power.should_wakeup = true; return 0; } static inline void device_wakeup_disable(struct device *dev) { dev->power.should_wakeup = false; } static inline int device_set_wakeup_enable(struct device *dev, bool enable) { dev->power.should_wakeup = enable; return 0; } static inline bool device_may_wakeup(struct device *dev) { return dev->power.can_wakeup && dev->power.should_wakeup; } static inline bool device_wakeup_path(struct device *dev) { return false; } static inline void device_set_wakeup_path(struct device *dev) {} static inline void device_set_out_band_wakeup(struct device *dev) {} static inline bool device_out_band_wakeup(struct device *dev) { return false; } static inline void __pm_stay_awake(struct wakeup_source *ws) {} static inline void pm_stay_awake(struct device *dev) {} static inline void __pm_relax(struct wakeup_source *ws) {} static inline void pm_relax(struct device *dev) {} static inline void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard) {} static inline void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard) {} #endif /* !CONFIG_PM_SLEEP */ static inline bool device_awake_path(struct device *dev) { return device_wakeup_path(dev); } static inline void device_set_awake_path(struct device *dev) { device_set_wakeup_path(dev); } static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) { pm_wakeup_ws_event(ws, msec, false); } static inline void pm_wakeup_event(struct device *dev, unsigned int msec) { pm_wakeup_dev_event(dev, msec, false); } static inline void pm_wakeup_hard_event(struct device *dev) { pm_wakeup_dev_event(dev, 0, true); } /** * device_init_wakeup - Device wakeup initialization. * @dev: Device to handle. * @enable: Whether or not to enable @dev as a wakeup device. * * By default, most devices should leave wakeup disabled. The exceptions are * devices that everyone expects to be wakeup sources: keyboards, power buttons, * possibly network interfaces, etc. Also, devices that don't generate their * own wakeup requests but merely forward requests from one bus to another * (like PCI bridges) should have wakeup enabled by default. */ static inline int device_init_wakeup(struct device *dev, bool enable) { if (enable) { device_set_wakeup_capable(dev, true); return device_wakeup_enable(dev); } device_wakeup_disable(dev); device_set_wakeup_capable(dev, false); return 0; } static void device_disable_wakeup(void *dev) { device_init_wakeup(dev, false); } /** * devm_device_init_wakeup - Resource managed device wakeup initialization. * @dev: Device to handle. * * This function is the devm managed version of device_init_wakeup(dev, true). */ static inline int devm_device_init_wakeup(struct device *dev) { device_init_wakeup(dev, true); return devm_add_action_or_reset(dev, device_disable_wakeup, dev); } #endif /* _LINUX_PM_WAKEUP_H */
849 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 /* SPDX-License-Identifier: GPL-2.0+ */ #ifndef _LINUX_OF_H #define _LINUX_OF_H /* * Definitions for talking to the Open Firmware PROM on * Power Macintosh and other computers. * * Copyright (C) 1996-2005 Paul Mackerras. * * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp. * Updates for SPARC64 by David S. Miller * Derived from PowerPC and Sparc prom.h files by Stephen Rothwell, IBM Corp. */ #include <linux/types.h> #include <linux/bitops.h> #include <linux/cleanup.h> #include <linux/errno.h> #include <linux/kobject.h> #include <linux/mod_devicetable.h> #include <linux/property.h> #include <linux/list.h> #include <asm/byteorder.h> typedef u32 phandle; typedef u32 ihandle; struct property { char *name; int length; void *value; struct property *next; #if defined(CONFIG_OF_DYNAMIC) || defined(CONFIG_SPARC) unsigned long _flags; #endif #if defined(CONFIG_OF_PROMTREE) unsigned int unique_id; #endif #if defined(CONFIG_OF_KOBJ) struct bin_attribute attr; #endif }; #if defined(CONFIG_SPARC) struct of_irq_controller; #endif struct device_node { const char *name; phandle phandle; const char *full_name; struct fwnode_handle fwnode; struct property *properties; struct property *deadprops; /* removed properties */ struct device_node *parent; struct device_node *child; struct device_node *sibling; #if defined(CONFIG_OF_KOBJ) struct kobject kobj; #endif unsigned long _flags; void *data; #if defined(CONFIG_SPARC) unsigned int unique_id; struct of_irq_controller *irq_trans; #endif }; #define MAX_PHANDLE_ARGS NR_FWNODE_REFERENCE_ARGS struct of_phandle_args { struct device_node *np; int args_count; uint32_t args[MAX_PHANDLE_ARGS]; }; struct of_phandle_iterator { /* Common iterator information */ const char *cells_name; int cell_count; const struct device_node *parent; /* List size information */ const __be32 *list_end; const __be32 *phandle_end; /* Current position state */ const __be32 *cur; uint32_t cur_count; phandle phandle; struct device_node *node; }; struct of_reconfig_data { struct device_node *dn; struct property *prop; struct property *old_prop; }; extern const struct kobj_type of_node_ktype; extern const struct fwnode_operations of_fwnode_ops; /** * of_node_init - initialize a devicetree node * @node: Pointer to device node that has been created by kzalloc() * * On return the device_node refcount is set to one. Use of_node_put() * on @node when done to free the memory allocated for it. If the node * is NOT a dynamic node the memory will not be freed. The decision of * whether to free the memory will be done by node->release(), which is * of_node_release(). */ static inline void of_node_init(struct device_node *node) { #if defined(CONFIG_OF_KOBJ) kobject_init(&node->kobj, &of_node_ktype); #endif fwnode_init(&node->fwnode, &of_fwnode_ops); } #if defined(CONFIG_OF_KOBJ) #define of_node_kobj(n) (&(n)->kobj) #else #define of_node_kobj(n) NULL #endif #ifdef CONFIG_OF_DYNAMIC extern struct device_node *of_node_get(struct device_node *node); extern void of_node_put(struct device_node *node); #else /* CONFIG_OF_DYNAMIC */ /* Dummy ref counting routines - to be implemented later */ static inline struct device_node *of_node_get(struct device_node *node) { return node; } static inline void of_node_put(struct device_node *node) { } #endif /* !CONFIG_OF_DYNAMIC */ DEFINE_FREE(device_node, struct device_node *, if (_T) of_node_put(_T)) /* Pointer for first entry in chain of all nodes. */ extern struct device_node *of_root; extern struct device_node *of_chosen; extern struct device_node *of_aliases; extern struct device_node *of_stdout; /* * struct device_node flag descriptions * (need to be visible even when !CONFIG_OF) */ #define OF_DYNAMIC 1 /* (and properties) allocated via kmalloc */ #define OF_DETACHED 2 /* detached from the device tree */ #define OF_POPULATED 3 /* device already created */ #define OF_POPULATED_BUS 4 /* platform bus created for children */ #define OF_OVERLAY 5 /* allocated for an overlay */ #define OF_OVERLAY_FREE_CSET 6 /* in overlay cset being freed */ #define OF_BAD_ADDR ((u64)-1) #ifdef CONFIG_OF void of_core_init(void); static inline bool is_of_node(const struct fwnode_handle *fwnode) { return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &of_fwnode_ops; } #define to_of_node(__fwnode) \ ({ \ typeof(__fwnode) __to_of_node_fwnode = (__fwnode); \ \ is_of_node(__to_of_node_fwnode) ? \ container_of(__to_of_node_fwnode, \ struct device_node, fwnode) : \ NULL; \ }) #define of_fwnode_handle(node) \ ({ \ typeof(node) __of_fwnode_handle_node = (node); \ \ __of_fwnode_handle_node ? \ &__of_fwnode_handle_node->fwnode : NULL; \ }) static inline bool of_node_is_root(const struct device_node *node) { return node && (node->parent == NULL); } static inline int of_node_check_flag(const struct device_node *n, unsigned long flag) { return test_bit(flag, &n->_flags); } static inline int of_node_test_and_set_flag(struct device_node *n, unsigned long flag) { return test_and_set_bit(flag, &n->_flags); } static inline void of_node_set_flag(struct device_node *n, unsigned long flag) { set_bit(flag, &n->_flags); } static inline void of_node_clear_flag(struct device_node *n, unsigned long flag) { clear_bit(flag, &n->_flags); } #if defined(CONFIG_OF_DYNAMIC) || defined(CONFIG_SPARC) static inline int of_property_check_flag(const struct property *p, unsigned long flag) { return test_bit(flag, &p->_flags); } static inline void of_property_set_flag(struct property *p, unsigned long flag) { set_bit(flag, &p->_flags); } static inline void of_property_clear_flag(struct property *p, unsigned long flag) { clear_bit(flag, &p->_flags); } #endif extern struct device_node *__of_find_all_nodes(struct device_node *prev); extern struct device_node *of_find_all_nodes(struct device_node *prev); /* * OF address retrieval & translation */ /* Helper to read a big number; size is in cells (not bytes) */ static inline u64 of_read_number(const __be32 *cell, int size) { u64 r = 0; for (; size--; cell++) r = (r << 32) | be32_to_cpu(*cell); return r; } /* Like of_read_number, but we want an unsigned long result */ static inline unsigned long of_read_ulong(const __be32 *cell, int size) { /* toss away upper bits if unsigned long is smaller than u64 */ return of_read_number(cell, size); } #if defined(CONFIG_SPARC) #include <asm/prom.h> #endif #define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) #define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) extern bool of_node_name_eq(const struct device_node *np, const char *name); extern bool of_node_name_prefix(const struct device_node *np, const char *prefix); static inline const char *of_node_full_name(const struct device_node *np) { return np ? np->full_name : "<no-node>"; } #define for_each_of_allnodes_from(from, dn) \ for (dn = __of_find_all_nodes(from); dn; dn = __of_find_all_nodes(dn)) #define for_each_of_allnodes(dn) for_each_of_allnodes_from(NULL, dn) extern struct device_node *of_find_node_by_name(struct device_node *from, const char *name); extern struct device_node *of_find_node_by_type(struct device_node *from, const char *type); extern struct device_node *of_find_compatible_node(struct device_node *from, const char *type, const char *compat); extern struct device_node *of_find_matching_node_and_match( struct device_node *from, const struct of_device_id *matches, const struct of_device_id **match); extern struct device_node *of_find_node_opts_by_path(const char *path, const char **opts); static inline struct device_node *of_find_node_by_path(const char *path) { return of_find_node_opts_by_path(path, NULL); } extern struct device_node *of_find_node_by_phandle(phandle handle); extern struct device_node *of_get_parent(const struct device_node *node); extern struct device_node *of_get_next_parent(struct device_node *node); extern struct device_node *of_get_next_child(const struct device_node *node, struct device_node *prev); extern struct device_node *of_get_next_child_with_prefix(const struct device_node *node, struct device_node *prev, const char *prefix); extern struct device_node *of_get_next_available_child( const struct device_node *node, struct device_node *prev); extern struct device_node *of_get_next_reserved_child( const struct device_node *node, struct device_node *prev); extern struct device_node *of_get_compatible_child(const struct device_node *parent, const char *compatible); extern struct device_node *of_get_child_by_name(const struct device_node *node, const char *name); extern struct device_node *of_get_available_child_by_name(const struct device_node *node, const char *name); /* cache lookup */ extern struct device_node *of_find_next_cache_node(const struct device_node *); extern int of_find_last_cache_level(unsigned int cpu); extern struct device_node *of_find_node_with_property( struct device_node *from, const char *prop_name); extern struct property *of_find_property(const struct device_node *np, const char *name, int *lenp); extern bool of_property_read_bool(const struct device_node *np, const char *propname); extern int of_property_count_elems_of_size(const struct device_node *np, const char *propname, int elem_size); extern int of_property_read_u8_index(const struct device_node *np, const char *propname, u32 index, u8 *out_value); extern int of_property_read_u16_index(const struct device_node *np, const char *propname, u32 index, u16 *out_value); extern int of_property_read_u32_index(const struct device_node *np, const char *propname, u32 index, u32 *out_value); extern int of_property_read_u64_index(const struct device_node *np, const char *propname, u32 index, u64 *out_value); extern int of_property_read_variable_u8_array(const struct device_node *np, const char *propname, u8 *out_values, size_t sz_min, size_t sz_max); extern int of_property_read_variable_u16_array(const struct device_node *np, const char *propname, u16 *out_values, size_t sz_min, size_t sz_max); extern int of_property_read_variable_u32_array(const struct device_node *np, const char *propname, u32 *out_values, size_t sz_min, size_t sz_max); extern int of_property_read_u64(const struct device_node *np, const char *propname, u64 *out_value); extern int of_property_read_variable_u64_array(const struct device_node *np, const char *propname, u64 *out_values, size_t sz_min, size_t sz_max); extern int of_property_read_string(const struct device_node *np, const char *propname, const char **out_string); extern int of_property_match_string(const struct device_node *np, const char *propname, const char *string); extern int of_property_read_string_helper(const struct device_node *np, const char *propname, const char **out_strs, size_t sz, int index); extern int of_device_is_compatible(const struct device_node *device, const char *); extern int of_device_compatible_match(const struct device_node *device, const char *const *compat); extern bool of_device_is_available(const struct device_node *device); extern bool of_device_is_big_endian(const struct device_node *device); extern const void *of_get_property(const struct device_node *node, const char *name, int *lenp); extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread); extern struct device_node *of_cpu_device_node_get(int cpu); extern int of_cpu_node_to_id(struct device_node *np); extern struct device_node *of_get_next_cpu_node(struct device_node *prev); extern struct device_node *of_get_cpu_state_node(const struct device_node *cpu_node, int index); extern u64 of_get_cpu_hwid(struct device_node *cpun, unsigned int thread); extern int of_n_addr_cells(struct device_node *np); extern int of_n_size_cells(struct device_node *np); extern const struct of_device_id *of_match_node( const struct of_device_id *matches, const struct device_node *node); extern const void *of_device_get_match_data(const struct device *dev); extern int of_alias_from_compatible(const struct device_node *node, char *alias, int len); extern void of_print_phandle_args(const char *msg, const struct of_phandle_args *args); extern int __of_parse_phandle_with_args(const struct device_node *np, const char *list_name, const char *cells_name, int cell_count, int index, struct of_phandle_args *out_args); extern int of_parse_phandle_with_args_map(const struct device_node *np, const char *list_name, const char *stem_name, int index, struct of_phandle_args *out_args); extern int of_count_phandle_with_args(const struct device_node *np, const char *list_name, const char *cells_name); /* module functions */ extern ssize_t of_modalias(const struct device_node *np, char *str, ssize_t len); extern int of_request_module(const struct device_node *np); /* phandle iterator functions */ extern int of_phandle_iterator_init(struct of_phandle_iterator *it, const struct device_node *np, const char *list_name, const char *cells_name, int cell_count); extern int of_phandle_iterator_next(struct of_phandle_iterator *it); extern int of_phandle_iterator_args(struct of_phandle_iterator *it, uint32_t *args, int size); extern int of_alias_get_id(const struct device_node *np, const char *stem); extern int of_alias_get_highest_id(const char *stem); bool of_machine_compatible_match(const char *const *compats); bool of_machine_device_match(const struct of_device_id *matches); const void *of_machine_get_match_data(const struct of_device_id *matches); /** * of_machine_is_compatible - Test root of device tree for a given compatible value * @compat: compatible string to look for in root node's compatible property. * * Return: true if the root node has the given value in its compatible property. */ static inline bool of_machine_is_compatible(const char *compat) { const char *compats[] = { compat, NULL }; return of_machine_compatible_match(compats); } extern int of_add_property(struct device_node *np, struct property *prop); extern int of_remove_property(struct device_node *np, struct property *prop); extern int of_update_property(struct device_node *np, struct property *newprop); /* For updating the device tree at runtime */ #define OF_RECONFIG_ATTACH_NODE 0x0001 #define OF_RECONFIG_DETACH_NODE 0x0002 #define OF_RECONFIG_ADD_PROPERTY 0x0003 #define OF_RECONFIG_REMOVE_PROPERTY 0x0004 #define OF_RECONFIG_UPDATE_PROPERTY 0x0005 extern int of_attach_node(struct device_node *); extern int of_detach_node(struct device_node *); #define of_match_ptr(_ptr) (_ptr) /* * u32 u; * * of_property_for_each_u32(np, "propname", u) * printk("U32 value: %x\n", u); */ const __be32 *of_prop_next_u32(const struct property *prop, const __be32 *cur, u32 *pu); /* * struct property *prop; * const char *s; * * of_property_for_each_string(np, "propname", prop, s) * printk("String value: %s\n", s); */ const char *of_prop_next_string(const struct property *prop, const char *cur); bool of_console_check(const struct device_node *dn, char *name, int index); int of_map_id(const struct device_node *np, u32 id, const char *map_name, const char *map_mask_name, struct device_node **target, u32 *id_out); phys_addr_t of_dma_get_max_cpu_address(struct device_node *np); struct kimage; void *of_kexec_alloc_and_setup_fdt(const struct kimage *image, unsigned long initrd_load_addr, unsigned long initrd_len, const char *cmdline, size_t extra_fdt_size); #else /* CONFIG_OF */ static inline void of_core_init(void) { } static inline bool is_of_node(const struct fwnode_handle *fwnode) { return false; } static inline struct device_node *to_of_node(const struct fwnode_handle *fwnode) { return NULL; } static inline bool of_node_name_eq(const struct device_node *np, const char *name) { return false; } static inline bool of_node_name_prefix(const struct device_node *np, const char *prefix) { return false; } static inline const char* of_node_full_name(const struct device_node *np) { return "<no-node>"; } static inline struct device_node *of_find_node_by_name(struct device_node *from, const char *name) { return NULL; } static inline struct device_node *of_find_node_by_type(struct device_node *from, const char *type) { return NULL; } static inline struct device_node *of_find_matching_node_and_match( struct device_node *from, const struct of_device_id *matches, const struct of_device_id **match) { return NULL; } static inline struct device_node *of_find_node_by_path(const char *path) { return NULL; } static inline struct device_node *of_find_node_opts_by_path(const char *path, const char **opts) { return NULL; } static inline struct device_node *of_find_node_by_phandle(phandle handle) { return NULL; } static inline struct device_node *of_get_parent(const struct device_node *node) { return NULL; } static inline struct device_node *of_get_next_parent(struct device_node *node) { return NULL; } static inline struct device_node *of_get_next_child( const struct device_node *node, struct device_node *prev) { return NULL; } static inline struct device_node *of_get_next_child_with_prefix( const struct device_node *node, struct device_node *prev, const char *prefix) { return NULL; } static inline struct device_node *of_get_next_available_child( const struct device_node *node, struct device_node *prev) { return NULL; } static inline struct device_node *of_get_next_reserved_child( const struct device_node *node, struct device_node *prev) { return NULL; } static inline struct device_node *of_find_node_with_property( struct device_node *from, const char *prop_name) { return NULL; } #define of_fwnode_handle(node) NULL static inline struct device_node *of_get_compatible_child(const struct device_node *parent, const char *compatible) { return NULL; } static inline struct device_node *of_get_child_by_name( const struct device_node *node, const char *name) { return NULL; } static inline struct device_node *of_get_available_child_by_name( const struct device_node *node, const char *name) { return NULL; } static inline int of_device_is_compatible(const struct device_node *device, const char *name) { return 0; } static inline int of_device_compatible_match(const struct device_node *device, const char *const *compat) { return 0; } static inline bool of_device_is_available(const struct device_node *device) { return false; } static inline bool of_device_is_big_endian(const struct device_node *device) { return false; } static inline struct property *of_find_property(const struct device_node *np, const char *name, int *lenp) { return NULL; } static inline struct device_node *of_find_compatible_node( struct device_node *from, const char *type, const char *compat) { return NULL; } static inline bool of_property_read_bool(const struct device_node *np, const char *propname) { return false; } static inline int of_property_count_elems_of_size(const struct device_node *np, const char *propname, int elem_size) { return -ENOSYS; } static inline int of_property_read_u8_index(const struct device_node *np, const char *propname, u32 index, u8 *out_value) { return -ENOSYS; } static inline int of_property_read_u16_index(const struct device_node *np, const char *propname, u32 index, u16 *out_value) { return -ENOSYS; } static inline int of_property_read_u32_index(const struct device_node *np, const char *propname, u32 index, u32 *out_value) { return -ENOSYS; } static inline int of_property_read_u64_index(const struct device_node *np, const char *propname, u32 index, u64 *out_value) { return -ENOSYS; } static inline const void *of_get_property(const struct device_node *node, const char *name, int *lenp) { return NULL; } static inline struct device_node *of_get_cpu_node(int cpu, unsigned int *thread) { return NULL; } static inline struct device_node *of_cpu_device_node_get(int cpu) { return NULL; } static inline int of_cpu_node_to_id(struct device_node *np) { return -ENODEV; } static inline struct device_node *of_get_next_cpu_node(struct device_node *prev) { return NULL; } static inline struct device_node *of_get_cpu_state_node(struct device_node *cpu_node, int index) { return NULL; } static inline int of_n_addr_cells(struct device_node *np) { return 0; } static inline int of_n_size_cells(struct device_node *np) { return 0; } static inline int of_property_read_variable_u8_array(const struct device_node *np, const char *propname, u8 *out_values, size_t sz_min, size_t sz_max) { return -ENOSYS; } static inline int of_property_read_variable_u16_array(const struct device_node *np, const char *propname, u16 *out_values, size_t sz_min, size_t sz_max) { return -ENOSYS; } static inline int of_property_read_variable_u32_array(const struct device_node *np, const char *propname, u32 *out_values, size_t sz_min, size_t sz_max) { return -ENOSYS; } static inline int of_property_read_u64(const struct device_node *np, const char *propname, u64 *out_value) { return -ENOSYS; } static inline int of_property_read_variable_u64_array(const struct device_node *np, const char *propname, u64 *out_values, size_t sz_min, size_t sz_max) { return -ENOSYS; } static inline int of_property_read_string(const struct device_node *np, const char *propname, const char **out_string) { return -ENOSYS; } static inline int of_property_match_string(const struct device_node *np, const char *propname, const char *string) { return -ENOSYS; } static inline int of_property_read_string_helper(const struct device_node *np, const char *propname, const char **out_strs, size_t sz, int index) { return -ENOSYS; } static inline int __of_parse_phandle_with_args(const struct device_node *np, const char *list_name, const char *cells_name, int cell_count, int index, struct of_phandle_args *out_args) { return -ENOSYS; } static inline int of_parse_phandle_with_args_map(const struct device_node *np, const char *list_name, const char *stem_name, int index, struct of_phandle_args *out_args) { return -ENOSYS; } static inline int of_count_phandle_with_args(const struct device_node *np, const char *list_name, const char *cells_name) { return -ENOSYS; } static inline ssize_t of_modalias(const struct device_node *np, char *str, ssize_t len) { return -ENODEV; } static inline int of_request_module(const struct device_node *np) { return -ENODEV; } static inline int of_phandle_iterator_init(struct of_phandle_iterator *it, const struct device_node *np, const char *list_name, const char *cells_name, int cell_count) { return -ENOSYS; } static inline int of_phandle_iterator_next(struct of_phandle_iterator *it) { return -ENOSYS; } static inline int of_phandle_iterator_args(struct of_phandle_iterator *it, uint32_t *args, int size) { return 0; } static inline int of_alias_get_id(struct device_node *np, const char *stem) { return -ENOSYS; } static inline int of_alias_get_highest_id(const char *stem) { return -ENOSYS; } static inline int of_machine_is_compatible(const char *compat) { return 0; } static inline int of_add_property(struct device_node *np, struct property *prop) { return 0; } static inline int of_remove_property(struct device_node *np, struct property *prop) { return 0; } static inline bool of_machine_compatible_match(const char *const *compats) { return false; } static inline bool of_machine_device_match(const struct of_device_id *matches) { return false; } static inline const void * of_machine_get_match_data(const struct of_device_id *matches) { return NULL; } static inline bool of_console_check(const struct device_node *dn, const char *name, int index) { return false; } static inline const __be32 *of_prop_next_u32(const struct property *prop, const __be32 *cur, u32 *pu) { return NULL; } static inline const char *of_prop_next_string(const struct property *prop, const char *cur) { return NULL; } static inline int of_node_check_flag(struct device_node *n, unsigned long flag) { return 0; } static inline int of_node_test_and_set_flag(struct device_node *n, unsigned long flag) { return 0; } static inline void of_node_set_flag(struct device_node *n, unsigned long flag) { } static inline void of_node_clear_flag(struct device_node *n, unsigned long flag) { } static inline int of_property_check_flag(const struct property *p, unsigned long flag) { return 0; } static inline void of_property_set_flag(struct property *p, unsigned long flag) { } static inline void of_property_clear_flag(struct property *p, unsigned long flag) { } static inline int of_map_id(const struct device_node *np, u32 id, const char *map_name, const char *map_mask_name, struct device_node **target, u32 *id_out) { return -EINVAL; } static inline phys_addr_t of_dma_get_max_cpu_address(struct device_node *np) { return PHYS_ADDR_MAX; } static inline const void *of_device_get_match_data(const struct device *dev) { return NULL; } #define of_match_ptr(_ptr) NULL #define of_match_node(_matches, _node) NULL #endif /* CONFIG_OF */ /* Default string compare functions, Allow arch asm/prom.h to override */ #if !defined(of_compat_cmp) #define of_compat_cmp(s1, s2, l) strcasecmp((s1), (s2)) #define of_prop_cmp(s1, s2) strcmp((s1), (s2)) #define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) #endif #define for_each_property_of_node(dn, pp) \ for (pp = dn->properties; pp != NULL; pp = pp->next) #if defined(CONFIG_OF) && defined(CONFIG_NUMA) extern int of_node_to_nid(struct device_node *np); #else static inline int of_node_to_nid(struct device_node *device) { return NUMA_NO_NODE; } #endif #ifdef CONFIG_OF_NUMA extern int of_numa_init(void); #else static inline int of_numa_init(void) { return -ENOSYS; } #endif static inline struct device_node *of_find_matching_node( struct device_node *from, const struct of_device_id *matches) { return of_find_matching_node_and_match(from, matches, NULL); } static inline const char *of_node_get_device_type(const struct device_node *np) { return of_get_property(np, "device_type", NULL); } static inline bool of_node_is_type(const struct device_node *np, const char *type) { const char *match = of_node_get_device_type(np); return np && match && type && !strcmp(match, type); } /** * of_parse_phandle - Resolve a phandle property to a device_node pointer * @np: Pointer to device node holding phandle property * @phandle_name: Name of property holding a phandle value * @index: For properties holding a table of phandles, this is the index into * the table * * Return: The device_node pointer with refcount incremented. Use * of_node_put() on it when done. */ static inline struct device_node *of_parse_phandle(const struct device_node *np, const char *phandle_name, int index) { struct of_phandle_args args; if (__of_parse_phandle_with_args(np, phandle_name, NULL, 0, index, &args)) return NULL; return args.np; } /** * of_parse_phandle_with_args() - Find a node pointed by phandle in a list * @np: pointer to a device tree node containing a list * @list_name: property name that contains a list * @cells_name: property name that specifies phandles' arguments count * @index: index of a phandle to parse out * @out_args: optional pointer to output arguments structure (will be filled) * * This function is useful to parse lists of phandles and their arguments. * Returns 0 on success and fills out_args, on error returns appropriate * errno value. * * Caller is responsible to call of_node_put() on the returned out_args->np * pointer. * * Example:: * * phandle1: node1 { * #list-cells = <2>; * }; * * phandle2: node2 { * #list-cells = <1>; * }; * * node3 { * list = <&phandle1 1 2 &phandle2 3>; * }; * * To get a device_node of the ``node2`` node you may call this: * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args); */ static inline int of_parse_phandle_with_args(const struct device_node *np, const char *list_name, const char *cells_name, int index, struct of_phandle_args *out_args) { int cell_count = -1; /* If cells_name is NULL we assume a cell count of 0 */ if (!cells_name) cell_count = 0; return __of_parse_phandle_with_args(np, list_name, cells_name, cell_count, index, out_args); } /** * of_parse_phandle_with_fixed_args() - Find a node pointed by phandle in a list * @np: pointer to a device tree node containing a list * @list_name: property name that contains a list * @cell_count: number of argument cells following the phandle * @index: index of a phandle to parse out * @out_args: optional pointer to output arguments structure (will be filled) * * This function is useful to parse lists of phandles and their arguments. * Returns 0 on success and fills out_args, on error returns appropriate * errno value. * * Caller is responsible to call of_node_put() on the returned out_args->np * pointer. * * Example:: * * phandle1: node1 { * }; * * phandle2: node2 { * }; * * node3 { * list = <&phandle1 0 2 &phandle2 2 3>; * }; * * To get a device_node of the ``node2`` node you may call this: * of_parse_phandle_with_fixed_args(node3, "list", 2, 1, &args); */ static inline int of_parse_phandle_with_fixed_args(const struct device_node *np, const char *list_name, int cell_count, int index, struct of_phandle_args *out_args) { return __of_parse_phandle_with_args(np, list_name, NULL, cell_count, index, out_args); } /** * of_parse_phandle_with_optional_args() - Find a node pointed by phandle in a list * @np: pointer to a device tree node containing a list * @list_name: property name that contains a list * @cells_name: property name that specifies phandles' arguments count * @index: index of a phandle to parse out * @out_args: optional pointer to output arguments structure (will be filled) * * Same as of_parse_phandle_with_args() except that if the cells_name property * is not found, cell_count of 0 is assumed. * * This is used to useful, if you have a phandle which didn't have arguments * before and thus doesn't have a '#*-cells' property but is now migrated to * having arguments while retaining backwards compatibility. */ static inline int of_parse_phandle_with_optional_args(const struct device_node *np, const char *list_name, const char *cells_name, int index, struct of_phandle_args *out_args) { return __of_parse_phandle_with_args(np, list_name, cells_name, 0, index, out_args); } /** * of_phandle_args_equal() - Compare two of_phandle_args * @a1: First of_phandle_args to compare * @a2: Second of_phandle_args to compare * * Return: True if a1 and a2 are the same (same node pointer, same phandle * args), false otherwise. */ static inline bool of_phandle_args_equal(const struct of_phandle_args *a1, const struct of_phandle_args *a2) { return a1->np == a2->np && a1->args_count == a2->args_count && !memcmp(a1->args, a2->args, sizeof(a1->args[0]) * a1->args_count); } /** * of_property_count_u8_elems - Count the number of u8 elements in a property * * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * * Search for a property in a device node and count the number of u8 elements * in it. * * Return: The number of elements on success, -EINVAL if the property does * not exist or its length does not match a multiple of u8 and -ENODATA if the * property does not have a value. */ static inline int of_property_count_u8_elems(const struct device_node *np, const char *propname) { return of_property_count_elems_of_size(np, propname, sizeof(u8)); } /** * of_property_count_u16_elems - Count the number of u16 elements in a property * * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * * Search for a property in a device node and count the number of u16 elements * in it. * * Return: The number of elements on success, -EINVAL if the property does * not exist or its length does not match a multiple of u16 and -ENODATA if the * property does not have a value. */ static inline int of_property_count_u16_elems(const struct device_node *np, const char *propname) { return of_property_count_elems_of_size(np, propname, sizeof(u16)); } /** * of_property_count_u32_elems - Count the number of u32 elements in a property * * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * * Search for a property in a device node and count the number of u32 elements * in it. * * Return: The number of elements on success, -EINVAL if the property does * not exist or its length does not match a multiple of u32 and -ENODATA if the * property does not have a value. */ static inline int of_property_count_u32_elems(const struct device_node *np, const char *propname) { return of_property_count_elems_of_size(np, propname, sizeof(u32)); } /** * of_property_count_u64_elems - Count the number of u64 elements in a property * * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * * Search for a property in a device node and count the number of u64 elements * in it. * * Return: The number of elements on success, -EINVAL if the property does * not exist or its length does not match a multiple of u64 and -ENODATA if the * property does not have a value. */ static inline int of_property_count_u64_elems(const struct device_node *np, const char *propname) { return of_property_count_elems_of_size(np, propname, sizeof(u64)); } /** * of_property_read_string_array() - Read an array of strings from a multiple * strings property. * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * @out_strs: output array of string pointers. * @sz: number of array elements to read. * * Search for a property in a device tree node and retrieve a list of * terminated string values (pointer to data, not a copy) in that property. * * Return: If @out_strs is NULL, the number of strings in the property is returned. */ static inline int of_property_read_string_array(const struct device_node *np, const char *propname, const char **out_strs, size_t sz) { return of_property_read_string_helper(np, propname, out_strs, sz, 0); } /** * of_property_count_strings() - Find and return the number of strings from a * multiple strings property. * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * * Search for a property in a device tree node and retrieve the number of null * terminated string contain in it. * * Return: The number of strings on success, -EINVAL if the property does not * exist, -ENODATA if property does not have a value, and -EILSEQ if the string * is not null-terminated within the length of the property data. */ static inline int of_property_count_strings(const struct device_node *np, const char *propname) { return of_property_read_string_helper(np, propname, NULL, 0, 0); } /** * of_property_read_string_index() - Find and read a string from a multiple * strings property. * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * @index: index of the string in the list of strings * @output: pointer to null terminated return string, modified only if * return value is 0. * * Search for a property in a device tree node and retrieve a null * terminated string value (pointer to data, not a copy) in the list of strings * contained in that property. * * Return: 0 on success, -EINVAL if the property does not exist, -ENODATA if * property does not have a value, and -EILSEQ if the string is not * null-terminated within the length of the property data. * * The out_string pointer is modified only if a valid string can be decoded. */ static inline int of_property_read_string_index(const struct device_node *np, const char *propname, int index, const char **output) { int rc = of_property_read_string_helper(np, propname, output, 1, index); return rc < 0 ? rc : 0; } /** * of_property_present - Test if a property is present in a node * @np: device node to search for the property. * @propname: name of the property to be searched. * * Test for a property present in a device node. * * Return: true if the property exists false otherwise. */ static inline bool of_property_present(const struct device_node *np, const char *propname) { struct property *prop = of_find_property(np, propname, NULL); return prop ? true : false; } /** * of_property_read_u8_array - Find and read an array of u8 from a property. * * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * @out_values: pointer to return value, modified only if return value is 0. * @sz: number of array elements to read * * Search for a property in a device node and read 8-bit value(s) from * it. * * dts entry of array should be like: * ``property = /bits/ 8 <0x50 0x60 0x70>;`` * * Return: 0 on success, -EINVAL if the property does not exist, * -ENODATA if property does not have a value, and -EOVERFLOW if the * property data isn't large enough. * * The out_values is modified only if a valid u8 value can be decoded. */ static inline int of_property_read_u8_array(const struct device_node *np, const char *propname, u8 *out_values, size_t sz) { int ret = of_property_read_variable_u8_array(np, propname, out_values, sz, 0); if (ret >= 0) return 0; else return ret; } /** * of_property_read_u16_array - Find and read an array of u16 from a property. * * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * @out_values: pointer to return value, modified only if return value is 0. * @sz: number of array elements to read * * Search for a property in a device node and read 16-bit value(s) from * it. * * dts entry of array should be like: * ``property = /bits/ 16 <0x5000 0x6000 0x7000>;`` * * Return: 0 on success, -EINVAL if the property does not exist, * -ENODATA if property does not have a value, and -EOVERFLOW if the * property data isn't large enough. * * The out_values is modified only if a valid u16 value can be decoded. */ static inline int of_property_read_u16_array(const struct device_node *np, const char *propname, u16 *out_values, size_t sz) { int ret = of_property_read_variable_u16_array(np, propname, out_values, sz, 0); if (ret >= 0) return 0; else return ret; } /** * of_property_read_u32_array - Find and read an array of 32 bit integers * from a property. * * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * @out_values: pointer to return value, modified only if return value is 0. * @sz: number of array elements to read * * Search for a property in a device node and read 32-bit value(s) from * it. * * Return: 0 on success, -EINVAL if the property does not exist, * -ENODATA if property does not have a value, and -EOVERFLOW if the * property data isn't large enough. * * The out_values is modified only if a valid u32 value can be decoded. */ static inline int of_property_read_u32_array(const struct device_node *np, const char *propname, u32 *out_values, size_t sz) { int ret = of_property_read_variable_u32_array(np, propname, out_values, sz, 0); if (ret >= 0) return 0; else return ret; } /** * of_property_read_u64_array - Find and read an array of 64 bit integers * from a property. * * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * @out_values: pointer to return value, modified only if return value is 0. * @sz: number of array elements to read * * Search for a property in a device node and read 64-bit value(s) from * it. * * Return: 0 on success, -EINVAL if the property does not exist, * -ENODATA if property does not have a value, and -EOVERFLOW if the * property data isn't large enough. * * The out_values is modified only if a valid u64 value can be decoded. */ static inline int of_property_read_u64_array(const struct device_node *np, const char *propname, u64 *out_values, size_t sz) { int ret = of_property_read_variable_u64_array(np, propname, out_values, sz, 0); if (ret >= 0) return 0; else return ret; } static inline int of_property_read_u8(const struct device_node *np, const char *propname, u8 *out_value) { return of_property_read_u8_array(np, propname, out_value, 1); } static inline int of_property_read_u16(const struct device_node *np, const char *propname, u16 *out_value) { return of_property_read_u16_array(np, propname, out_value, 1); } static inline int of_property_read_u32(const struct device_node *np, const char *propname, u32 *out_value) { return of_property_read_u32_array(np, propname, out_value, 1); } static inline int of_property_read_s32(const struct device_node *np, const char *propname, s32 *out_value) { return of_property_read_u32(np, propname, (u32*) out_value); } #define of_for_each_phandle(it, err, np, ln, cn, cc) \ for (of_phandle_iterator_init((it), (np), (ln), (cn), (cc)), \ err = of_phandle_iterator_next(it); \ err == 0; \ err = of_phandle_iterator_next(it)) #define of_property_for_each_u32(np, propname, u) \ for (struct {const struct property *prop; const __be32 *item; } _it = \ {of_find_property(np, propname, NULL), \ of_prop_next_u32(_it.prop, NULL, &u)}; \ _it.item; \ _it.item = of_prop_next_u32(_it.prop, _it.item, &u)) #define of_property_for_each_string(np, propname, prop, s) \ for (prop = of_find_property(np, propname, NULL), \ s = of_prop_next_string(prop, NULL); \ s; \ s = of_prop_next_string(prop, s)) #define for_each_node_by_name(dn, name) \ for (dn = of_find_node_by_name(NULL, name); dn; \ dn = of_find_node_by_name(dn, name)) #define for_each_node_by_type(dn, type) \ for (dn = of_find_node_by_type(NULL, type); dn; \ dn = of_find_node_by_type(dn, type)) #define for_each_compatible_node(dn, type, compatible) \ for (dn = of_find_compatible_node(NULL, type, compatible); dn; \ dn = of_find_compatible_node(dn, type, compatible)) #define for_each_matching_node(dn, matches) \ for (dn = of_find_matching_node(NULL, matches); dn; \ dn = of_find_matching_node(dn, matches)) #define for_each_matching_node_and_match(dn, matches, match) \ for (dn = of_find_matching_node_and_match(NULL, matches, match); \ dn; dn = of_find_matching_node_and_match(dn, matches, match)) #define for_each_child_of_node(parent, child) \ for (child = of_get_next_child(parent, NULL); child != NULL; \ child = of_get_next_child(parent, child)) #define for_each_child_of_node_scoped(parent, child) \ for (struct device_node *child __free(device_node) = \ of_get_next_child(parent, NULL); \ child != NULL; \ child = of_get_next_child(parent, child)) #define for_each_child_of_node_with_prefix(parent, child, prefix) \ for (struct device_node *child __free(device_node) = \ of_get_next_child_with_prefix(parent, NULL, prefix); \ child != NULL; \ child = of_get_next_child_with_prefix(parent, child, prefix)) #define for_each_available_child_of_node(parent, child) \ for (child = of_get_next_available_child(parent, NULL); child != NULL; \ child = of_get_next_available_child(parent, child)) #define for_each_reserved_child_of_node(parent, child) \ for (child = of_get_next_reserved_child(parent, NULL); child != NULL; \ child = of_get_next_reserved_child(parent, child)) #define for_each_available_child_of_node_scoped(parent, child) \ for (struct device_node *child __free(device_node) = \ of_get_next_available_child(parent, NULL); \ child != NULL; \ child = of_get_next_available_child(parent, child)) #define for_each_of_cpu_node(cpu) \ for (cpu = of_get_next_cpu_node(NULL); cpu != NULL; \ cpu = of_get_next_cpu_node(cpu)) #define for_each_node_with_property(dn, prop_name) \ for (dn = of_find_node_with_property(NULL, prop_name); dn; \ dn = of_find_node_with_property(dn, prop_name)) static inline int of_get_child_count(const struct device_node *np) { struct device_node *child; int num = 0; for_each_child_of_node(np, child) num++; return num; } static inline int of_get_available_child_count(const struct device_node *np) { struct device_node *child; int num = 0; for_each_available_child_of_node(np, child) num++; return num; } #define _OF_DECLARE_STUB(table, name, compat, fn, fn_type) \ static const struct of_device_id __of_table_##name \ __attribute__((unused)) \ = { .compatible = compat, \ .data = (fn == (fn_type)NULL) ? fn : fn } #if defined(CONFIG_OF) && !defined(MODULE) #define _OF_DECLARE(table, name, compat, fn, fn_type) \ static const struct of_device_id __of_table_##name \ __used __section("__" #table "_of_table") \ __aligned(__alignof__(struct of_device_id)) \ = { .compatible = compat, \ .data = (fn == (fn_type)NULL) ? fn : fn } #else #define _OF_DECLARE(table, name, compat, fn, fn_type) \ _OF_DECLARE_STUB(table, name, compat, fn, fn_type) #endif typedef int (*of_init_fn_2)(struct device_node *, struct device_node *); typedef int (*of_init_fn_1_ret)(struct device_node *); typedef void (*of_init_fn_1)(struct device_node *); #define OF_DECLARE_1(table, name, compat, fn) \ _OF_DECLARE(table, name, compat, fn, of_init_fn_1) #define OF_DECLARE_1_RET(table, name, compat, fn) \ _OF_DECLARE(table, name, compat, fn, of_init_fn_1_ret) #define OF_DECLARE_2(table, name, compat, fn) \ _OF_DECLARE(table, name, compat, fn, of_init_fn_2) /** * struct of_changeset_entry - Holds a changeset entry * * @node: list_head for the log list * @action: notifier action * @np: pointer to the device node affected * @prop: pointer to the property affected * @old_prop: hold a pointer to the original property * * Every modification of the device tree during a changeset * is held in a list of of_changeset_entry structures. * That way we can recover from a partial application, or we can * revert the changeset */ struct of_changeset_entry { struct list_head node; unsigned long action; struct device_node *np; struct property *prop; struct property *old_prop; }; /** * struct of_changeset - changeset tracker structure * * @entries: list_head for the changeset entries * * changesets are a convenient way to apply bulk changes to the * live tree. In case of an error, changes are rolled-back. * changesets live on after initial application, and if not * destroyed after use, they can be reverted in one single call. */ struct of_changeset { struct list_head entries; }; enum of_reconfig_change { OF_RECONFIG_NO_CHANGE = 0, OF_RECONFIG_CHANGE_ADD, OF_RECONFIG_CHANGE_REMOVE, }; struct notifier_block; #ifdef CONFIG_OF_DYNAMIC extern int of_reconfig_notifier_register(struct notifier_block *); extern int of_reconfig_notifier_unregister(struct notifier_block *); extern int of_reconfig_notify(unsigned long, struct of_reconfig_data *rd); extern int of_reconfig_get_state_change(unsigned long action, struct of_reconfig_data *arg); extern void of_changeset_init(struct of_changeset *ocs); extern void of_changeset_destroy(struct of_changeset *ocs); extern int of_changeset_apply(struct of_changeset *ocs); extern int of_changeset_revert(struct of_changeset *ocs); extern int of_changeset_action(struct of_changeset *ocs, unsigned long action, struct device_node *np, struct property *prop); static inline int of_changeset_attach_node(struct of_changeset *ocs, struct device_node *np) { return of_changeset_action(ocs, OF_RECONFIG_ATTACH_NODE, np, NULL); } static inline int of_changeset_detach_node(struct of_changeset *ocs, struct device_node *np) { return of_changeset_action(ocs, OF_RECONFIG_DETACH_NODE, np, NULL); } static inline int of_changeset_add_property(struct of_changeset *ocs, struct device_node *np, struct property *prop) { return of_changeset_action(ocs, OF_RECONFIG_ADD_PROPERTY, np, prop); } static inline int of_changeset_remove_property(struct of_changeset *ocs, struct device_node *np, struct property *prop) { return of_changeset_action(ocs, OF_RECONFIG_REMOVE_PROPERTY, np, prop); } static inline int of_changeset_update_property(struct of_changeset *ocs, struct device_node *np, struct property *prop) { return of_changeset_action(ocs, OF_RECONFIG_UPDATE_PROPERTY, np, prop); } struct device_node *of_changeset_create_node(struct of_changeset *ocs, struct device_node *parent, const char *full_name); int of_changeset_add_prop_string(struct of_changeset *ocs, struct device_node *np, const char *prop_name, const char *str); int of_changeset_add_prop_string_array(struct of_changeset *ocs, struct device_node *np, const char *prop_name, const char * const *str_array, size_t sz); int of_changeset_add_prop_u32_array(struct of_changeset *ocs, struct device_node *np, const char *prop_name, const u32 *array, size_t sz); static inline int of_changeset_add_prop_u32(struct of_changeset *ocs, struct device_node *np, const char *prop_name, const u32 val) { return of_changeset_add_prop_u32_array(ocs, np, prop_name, &val, 1); } int of_changeset_update_prop_string(struct of_changeset *ocs, struct device_node *np, const char *prop_name, const char *str); int of_changeset_add_prop_bool(struct of_changeset *ocs, struct device_node *np, const char *prop_name); #else /* CONFIG_OF_DYNAMIC */ static inline int of_reconfig_notifier_register(struct notifier_block *nb) { return -EINVAL; } static inline int of_reconfig_notifier_unregister(struct notifier_block *nb) { return -EINVAL; } static inline int of_reconfig_notify(unsigned long action, struct of_reconfig_data *arg) { return -EINVAL; } static inline int of_reconfig_get_state_change(unsigned long action, struct of_reconfig_data *arg) { return -EINVAL; } #endif /* CONFIG_OF_DYNAMIC */ /** * of_device_is_system_power_controller - Tells if system-power-controller is found for device_node * @np: Pointer to the given device_node * * Return: true if present false otherwise */ static inline bool of_device_is_system_power_controller(const struct device_node *np) { return of_property_read_bool(np, "system-power-controller"); } /** * of_have_populated_dt() - Has DT been populated by bootloader * * Return: True if a DTB has been populated by the bootloader and it isn't the * empty builtin one. False otherwise. */ static inline bool of_have_populated_dt(void) { #ifdef CONFIG_OF return of_property_present(of_root, "compatible"); #else return false; #endif } /* * Overlay support */ enum of_overlay_notify_action { OF_OVERLAY_INIT = 0, /* kzalloc() of ovcs sets this value */ OF_OVERLAY_PRE_APPLY, OF_OVERLAY_POST_APPLY, OF_OVERLAY_PRE_REMOVE, OF_OVERLAY_POST_REMOVE, }; static inline const char *of_overlay_action_name(enum of_overlay_notify_action action) { static const char *const of_overlay_action_name[] = { "init", "pre-apply", "post-apply", "pre-remove", "post-remove", }; return of_overlay_action_name[action]; } struct of_overlay_notify_data { struct device_node *overlay; struct device_node *target; }; #ifdef CONFIG_OF_OVERLAY int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size, int *ovcs_id, const struct device_node *target_base); int of_overlay_remove(int *ovcs_id); int of_overlay_remove_all(void); int of_overlay_notifier_register(struct notifier_block *nb); int of_overlay_notifier_unregister(struct notifier_block *nb); #else static inline int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size, int *ovcs_id, const struct device_node *target_base) { return -ENOTSUPP; } static inline int of_overlay_remove(int *ovcs_id) { return -ENOTSUPP; } static inline int of_overlay_remove_all(void) { return -ENOTSUPP; } static inline int of_overlay_notifier_register(struct notifier_block *nb) { return 0; } static inline int of_overlay_notifier_unregister(struct notifier_block *nb) { return 0; } #endif #endif /* _LINUX_OF_H */
7 7 7 6 6 5 5 5 5 1 4 2 3 3 1 2 1 1 5 5 5 3 2 7 1 1 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 // SPDX-License-Identifier: GPL-2.0-or-later /* * AirSpy SDR driver * * Copyright (C) 2014 Antti Palosaari <crope@iki.fi> */ #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-event.h> #include <media/videobuf2-v4l2.h> #include <media/videobuf2-vmalloc.h> /* AirSpy USB API commands (from AirSpy Library) */ enum { CMD_INVALID = 0x00, CMD_RECEIVER_MODE = 0x01, CMD_SI5351C_WRITE = 0x02, CMD_SI5351C_READ = 0x03, CMD_R820T_WRITE = 0x04, CMD_R820T_READ = 0x05, CMD_SPIFLASH_ERASE = 0x06, CMD_SPIFLASH_WRITE = 0x07, CMD_SPIFLASH_READ = 0x08, CMD_BOARD_ID_READ = 0x09, CMD_VERSION_STRING_READ = 0x0a, CMD_BOARD_PARTID_SERIALNO_READ = 0x0b, CMD_SET_SAMPLE_RATE = 0x0c, CMD_SET_FREQ = 0x0d, CMD_SET_LNA_GAIN = 0x0e, CMD_SET_MIXER_GAIN = 0x0f, CMD_SET_VGA_GAIN = 0x10, CMD_SET_LNA_AGC = 0x11, CMD_SET_MIXER_AGC = 0x12, CMD_SET_PACKING = 0x13, }; /* * bEndpointAddress 0x81 EP 1 IN * Transfer Type Bulk * wMaxPacketSize 0x0200 1x 512 bytes */ #define MAX_BULK_BUFS (6) #define BULK_BUFFER_SIZE (128 * 512) static const struct v4l2_frequency_band bands[] = { { .tuner = 0, .type = V4L2_TUNER_ADC, .index = 0, .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS, .rangelow = 20000000, .rangehigh = 20000000, }, }; static const struct v4l2_frequency_band bands_rf[] = { { .tuner = 1, .type = V4L2_TUNER_RF, .index = 0, .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS, .rangelow = 24000000, .rangehigh = 1750000000, }, }; /* stream formats */ struct airspy_format { u32 pixelformat; u32 buffersize; }; /* format descriptions for capture and preview */ static struct airspy_format formats[] = { { .pixelformat = V4L2_SDR_FMT_RU12LE, .buffersize = BULK_BUFFER_SIZE, }, }; static const unsigned int NUM_FORMATS = ARRAY_SIZE(formats); /* intermediate buffers with raw data from the USB device */ struct airspy_frame_buf { /* common v4l buffer stuff -- must be first */ struct vb2_v4l2_buffer vb; struct list_head list; }; struct airspy { #define POWER_ON 1 #define USB_STATE_URB_BUF 2 unsigned long flags; struct device *dev; struct usb_device *udev; struct video_device vdev; struct v4l2_device v4l2_dev; /* videobuf2 queue and queued buffers list */ struct vb2_queue vb_queue; struct list_head queued_bufs; spinlock_t queued_bufs_lock; /* Protects queued_bufs */ unsigned sequence; /* Buffer sequence counter */ unsigned int vb_full; /* vb is full and packets dropped */ /* Note if taking both locks v4l2_lock must always be locked first! */ struct mutex v4l2_lock; /* Protects everything else */ struct mutex vb_queue_lock; /* Protects vb_queue and capt_file */ struct urb *urb_list[MAX_BULK_BUFS]; int buf_num; unsigned long buf_size; u8 *buf_list[MAX_BULK_BUFS]; dma_addr_t dma_addr[MAX_BULK_BUFS]; int urbs_initialized; int urbs_submitted; /* USB control message buffer */ #define BUF_SIZE 128 u8 *buf; /* Current configuration */ unsigned int f_adc; unsigned int f_rf; u32 pixelformat; u32 buffersize; /* Controls */ struct v4l2_ctrl_handler hdl; struct v4l2_ctrl *lna_gain_auto; struct v4l2_ctrl *lna_gain; struct v4l2_ctrl *mixer_gain_auto; struct v4l2_ctrl *mixer_gain; struct v4l2_ctrl *if_gain; /* Sample rate calc */ unsigned long jiffies_next; unsigned int sample; unsigned int sample_measured; }; #define airspy_dbg_usb_control_msg(_dev, _r, _t, _v, _i, _b, _l) { \ char *_direction; \ if (_t & USB_DIR_IN) \ _direction = "<<<"; \ else \ _direction = ">>>"; \ dev_dbg(_dev, "%02x %02x %02x %02x %02x %02x %02x %02x %s %*ph\n", \ _t, _r, _v & 0xff, _v >> 8, _i & 0xff, _i >> 8, \ _l & 0xff, _l >> 8, _direction, _l, _b); \ } /* execute firmware command */ static int airspy_ctrl_msg(struct airspy *s, u8 request, u16 value, u16 index, u8 *data, u16 size) { int ret; unsigned int pipe; u8 requesttype; switch (request) { case CMD_RECEIVER_MODE: case CMD_SET_FREQ: pipe = usb_sndctrlpipe(s->udev, 0); requesttype = (USB_TYPE_VENDOR | USB_DIR_OUT); break; case CMD_BOARD_ID_READ: case CMD_VERSION_STRING_READ: case CMD_BOARD_PARTID_SERIALNO_READ: case CMD_SET_LNA_GAIN: case CMD_SET_MIXER_GAIN: case CMD_SET_VGA_GAIN: case CMD_SET_LNA_AGC: case CMD_SET_MIXER_AGC: pipe = usb_rcvctrlpipe(s->udev, 0); requesttype = (USB_TYPE_VENDOR | USB_DIR_IN); break; default: dev_err(s->dev, "Unknown command %02x\n", request); ret = -EINVAL; goto err; } /* write request */ if (!(requesttype & USB_DIR_IN)) memcpy(s->buf, data, size); ret = usb_control_msg(s->udev, pipe, request, requesttype, value, index, s->buf, size, 1000); airspy_dbg_usb_control_msg(s->dev, request, requesttype, value, index, s->buf, size); if (ret < 0) { dev_err(s->dev, "usb_control_msg() failed %d request %02x\n", ret, request); goto err; } /* read request */ if (requesttype & USB_DIR_IN) memcpy(data, s->buf, size); return 0; err: return ret; } /* Private functions */ static struct airspy_frame_buf *airspy_get_next_fill_buf(struct airspy *s) { unsigned long flags; struct airspy_frame_buf *buf = NULL; spin_lock_irqsave(&s->queued_bufs_lock, flags); if (list_empty(&s->queued_bufs)) goto leave; buf = list_entry(s->queued_bufs.next, struct airspy_frame_buf, list); list_del(&buf->list); leave: spin_unlock_irqrestore(&s->queued_bufs_lock, flags); return buf; } static unsigned int airspy_convert_stream(struct airspy *s, void *dst, void *src, unsigned int src_len) { unsigned int dst_len; if (s->pixelformat == V4L2_SDR_FMT_RU12LE) { memcpy(dst, src, src_len); dst_len = src_len; } else { dst_len = 0; } /* calculate sample rate and output it in 10 seconds intervals */ if (unlikely(time_is_before_jiffies(s->jiffies_next))) { #define MSECS 10000UL unsigned int msecs = jiffies_to_msecs(jiffies - s->jiffies_next + msecs_to_jiffies(MSECS)); unsigned int samples = s->sample - s->sample_measured; s->jiffies_next = jiffies + msecs_to_jiffies(MSECS); s->sample_measured = s->sample; dev_dbg(s->dev, "slen=%u samples=%u msecs=%u sample rate=%lu\n", src_len, samples, msecs, samples * 1000UL / msecs); } /* total number of samples */ s->sample += src_len / 2; return dst_len; } /* * This gets called for the bulk stream pipe. This is done in interrupt * time, so it has to be fast, not crash, and not stall. Neat. */ static void airspy_urb_complete(struct urb *urb) { struct airspy *s = urb->context; struct airspy_frame_buf *fbuf; dev_dbg_ratelimited(s->dev, "status=%d length=%d/%d errors=%d\n", urb->status, urb->actual_length, urb->transfer_buffer_length, urb->error_count); switch (urb->status) { case 0: /* success */ case -ETIMEDOUT: /* NAK */ break; case -ECONNRESET: /* kill */ case -ENOENT: case -ESHUTDOWN: return; default: /* error */ dev_err_ratelimited(s->dev, "URB failed %d\n", urb->status); break; } if (likely(urb->actual_length > 0)) { void *ptr; unsigned int len; /* get free framebuffer */ fbuf = airspy_get_next_fill_buf(s); if (unlikely(fbuf == NULL)) { s->vb_full++; dev_notice_ratelimited(s->dev, "video buffer is full, %d packets dropped\n", s->vb_full); goto skip; } /* fill framebuffer */ ptr = vb2_plane_vaddr(&fbuf->vb.vb2_buf, 0); len = airspy_convert_stream(s, ptr, urb->transfer_buffer, urb->actual_length); vb2_set_plane_payload(&fbuf->vb.vb2_buf, 0, len); fbuf->vb.vb2_buf.timestamp = ktime_get_ns(); fbuf->vb.sequence = s->sequence++; vb2_buffer_done(&fbuf->vb.vb2_buf, VB2_BUF_STATE_DONE); } skip: usb_submit_urb(urb, GFP_ATOMIC); } static int airspy_kill_urbs(struct airspy *s) { int i; for (i = s->urbs_submitted - 1; i >= 0; i--) { dev_dbg(s->dev, "kill urb=%d\n", i); /* stop the URB */ usb_kill_urb(s->urb_list[i]); } s->urbs_submitted = 0; return 0; } static int airspy_submit_urbs(struct airspy *s) { int i, ret; for (i = 0; i < s->urbs_initialized; i++) { dev_dbg(s->dev, "submit urb=%d\n", i); ret = usb_submit_urb(s->urb_list[i], GFP_ATOMIC); if (ret) { dev_err(s->dev, "Could not submit URB no. %d - get them all back\n", i); airspy_kill_urbs(s); return ret; } s->urbs_submitted++; } return 0; } static int airspy_free_stream_bufs(struct airspy *s) { if (test_bit(USB_STATE_URB_BUF, &s->flags)) { while (s->buf_num) { s->buf_num--; dev_dbg(s->dev, "free buf=%d\n", s->buf_num); usb_free_coherent(s->udev, s->buf_size, s->buf_list[s->buf_num], s->dma_addr[s->buf_num]); } } clear_bit(USB_STATE_URB_BUF, &s->flags); return 0; } static int airspy_alloc_stream_bufs(struct airspy *s) { s->buf_num = 0; s->buf_size = BULK_BUFFER_SIZE; dev_dbg(s->dev, "all in all I will use %u bytes for streaming\n", MAX_BULK_BUFS * BULK_BUFFER_SIZE); for (s->buf_num = 0; s->buf_num < MAX_BULK_BUFS; s->buf_num++) { s->buf_list[s->buf_num] = usb_alloc_coherent(s->udev, BULK_BUFFER_SIZE, GFP_ATOMIC, &s->dma_addr[s->buf_num]); if (!s->buf_list[s->buf_num]) { dev_dbg(s->dev, "alloc buf=%d failed\n", s->buf_num); airspy_free_stream_bufs(s); return -ENOMEM; } dev_dbg(s->dev, "alloc buf=%d %p (dma %llu)\n", s->buf_num, s->buf_list[s->buf_num], (long long)s->dma_addr[s->buf_num]); set_bit(USB_STATE_URB_BUF, &s->flags); } return 0; } static int airspy_free_urbs(struct airspy *s) { int i; airspy_kill_urbs(s); for (i = s->urbs_initialized - 1; i >= 0; i--) { if (s->urb_list[i]) { dev_dbg(s->dev, "free urb=%d\n", i); /* free the URBs */ usb_free_urb(s->urb_list[i]); } } s->urbs_initialized = 0; return 0; } static int airspy_alloc_urbs(struct airspy *s) { int i, j; /* allocate the URBs */ for (i = 0; i < MAX_BULK_BUFS; i++) { dev_dbg(s->dev, "alloc urb=%d\n", i); s->urb_list[i] = usb_alloc_urb(0, GFP_ATOMIC); if (!s->urb_list[i]) { for (j = 0; j < i; j++) { usb_free_urb(s->urb_list[j]); s->urb_list[j] = NULL; } s->urbs_initialized = 0; return -ENOMEM; } usb_fill_bulk_urb(s->urb_list[i], s->udev, usb_rcvbulkpipe(s->udev, 0x81), s->buf_list[i], BULK_BUFFER_SIZE, airspy_urb_complete, s); s->urb_list[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP; s->urb_list[i]->transfer_dma = s->dma_addr[i]; s->urbs_initialized++; } return 0; } /* Must be called with vb_queue_lock hold */ static void airspy_cleanup_queued_bufs(struct airspy *s) { unsigned long flags; dev_dbg(s->dev, "\n"); spin_lock_irqsave(&s->queued_bufs_lock, flags); while (!list_empty(&s->queued_bufs)) { struct airspy_frame_buf *buf; buf = list_entry(s->queued_bufs.next, struct airspy_frame_buf, list); list_del(&buf->list); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); } spin_unlock_irqrestore(&s->queued_bufs_lock, flags); } /* The user yanked out the cable... */ static void airspy_disconnect(struct usb_interface *intf) { struct v4l2_device *v = usb_get_intfdata(intf); struct airspy *s = container_of(v, struct airspy, v4l2_dev); dev_dbg(s->dev, "\n"); mutex_lock(&s->vb_queue_lock); mutex_lock(&s->v4l2_lock); /* No need to keep the urbs around after disconnection */ s->udev = NULL; v4l2_device_disconnect(&s->v4l2_dev); video_unregister_device(&s->vdev); mutex_unlock(&s->v4l2_lock); mutex_unlock(&s->vb_queue_lock); v4l2_device_put(&s->v4l2_dev); } /* Videobuf2 operations */ static int airspy_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct airspy *s = vb2_get_drv_priv(vq); unsigned int q_num_bufs = vb2_get_num_buffers(vq); dev_dbg(s->dev, "nbuffers=%d\n", *nbuffers); /* Need at least 8 buffers */ if (q_num_bufs + *nbuffers < 8) *nbuffers = 8 - q_num_bufs; *nplanes = 1; sizes[0] = PAGE_ALIGN(s->buffersize); dev_dbg(s->dev, "nbuffers=%d sizes[0]=%d\n", *nbuffers, sizes[0]); return 0; } static void airspy_buf_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct airspy *s = vb2_get_drv_priv(vb->vb2_queue); struct airspy_frame_buf *buf = container_of(vbuf, struct airspy_frame_buf, vb); unsigned long flags; /* Check the device has not disconnected between prep and queuing */ if (unlikely(!s->udev)) { vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); return; } spin_lock_irqsave(&s->queued_bufs_lock, flags); list_add_tail(&buf->list, &s->queued_bufs); spin_unlock_irqrestore(&s->queued_bufs_lock, flags); } static int airspy_start_streaming(struct vb2_queue *vq, unsigned int count) { struct airspy *s = vb2_get_drv_priv(vq); int ret; dev_dbg(s->dev, "\n"); if (!s->udev) return -ENODEV; mutex_lock(&s->v4l2_lock); s->sequence = 0; set_bit(POWER_ON, &s->flags); ret = airspy_alloc_stream_bufs(s); if (ret) goto err_clear_bit; ret = airspy_alloc_urbs(s); if (ret) goto err_free_stream_bufs; ret = airspy_submit_urbs(s); if (ret) goto err_free_urbs; /* start hardware streaming */ ret = airspy_ctrl_msg(s, CMD_RECEIVER_MODE, 1, 0, NULL, 0); if (ret) goto err_kill_urbs; goto exit_mutex_unlock; err_kill_urbs: airspy_kill_urbs(s); err_free_urbs: airspy_free_urbs(s); err_free_stream_bufs: airspy_free_stream_bufs(s); err_clear_bit: clear_bit(POWER_ON, &s->flags); /* return all queued buffers to vb2 */ { struct airspy_frame_buf *buf, *tmp; list_for_each_entry_safe(buf, tmp, &s->queued_bufs, list) { list_del(&buf->list); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED); } } exit_mutex_unlock: mutex_unlock(&s->v4l2_lock); return ret; } static void airspy_stop_streaming(struct vb2_queue *vq) { struct airspy *s = vb2_get_drv_priv(vq); dev_dbg(s->dev, "\n"); mutex_lock(&s->v4l2_lock); /* stop hardware streaming */ airspy_ctrl_msg(s, CMD_RECEIVER_MODE, 0, 0, NULL, 0); airspy_kill_urbs(s); airspy_free_urbs(s); airspy_free_stream_bufs(s); airspy_cleanup_queued_bufs(s); clear_bit(POWER_ON, &s->flags); mutex_unlock(&s->v4l2_lock); } static const struct vb2_ops airspy_vb2_ops = { .queue_setup = airspy_queue_setup, .buf_queue = airspy_buf_queue, .start_streaming = airspy_start_streaming, .stop_streaming = airspy_stop_streaming, }; static int airspy_querycap(struct file *file, void *fh, struct v4l2_capability *cap) { struct airspy *s = video_drvdata(file); strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver)); strscpy(cap->card, s->vdev.name, sizeof(cap->card)); usb_make_path(s->udev, cap->bus_info, sizeof(cap->bus_info)); return 0; } static int airspy_enum_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { if (f->index >= NUM_FORMATS) return -EINVAL; f->pixelformat = formats[f->index].pixelformat; return 0; } static int airspy_g_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_format *f) { struct airspy *s = video_drvdata(file); f->fmt.sdr.pixelformat = s->pixelformat; f->fmt.sdr.buffersize = s->buffersize; return 0; } static int airspy_s_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_format *f) { struct airspy *s = video_drvdata(file); struct vb2_queue *q = &s->vb_queue; int i; if (vb2_is_busy(q)) return -EBUSY; for (i = 0; i < NUM_FORMATS; i++) { if (formats[i].pixelformat == f->fmt.sdr.pixelformat) { s->pixelformat = formats[i].pixelformat; s->buffersize = formats[i].buffersize; f->fmt.sdr.buffersize = formats[i].buffersize; return 0; } } s->pixelformat = formats[0].pixelformat; s->buffersize = formats[0].buffersize; f->fmt.sdr.pixelformat = formats[0].pixelformat; f->fmt.sdr.buffersize = formats[0].buffersize; return 0; } static int airspy_try_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_format *f) { int i; for (i = 0; i < NUM_FORMATS; i++) { if (formats[i].pixelformat == f->fmt.sdr.pixelformat) { f->fmt.sdr.buffersize = formats[i].buffersize; return 0; } } f->fmt.sdr.pixelformat = formats[0].pixelformat; f->fmt.sdr.buffersize = formats[0].buffersize; return 0; } static int airspy_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *v) { int ret; if (v->index == 0) ret = 0; else if (v->index == 1) ret = 0; else ret = -EINVAL; return ret; } static int airspy_g_tuner(struct file *file, void *priv, struct v4l2_tuner *v) { int ret; if (v->index == 0) { strscpy(v->name, "AirSpy ADC", sizeof(v->name)); v->type = V4L2_TUNER_ADC; v->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS; v->rangelow = bands[0].rangelow; v->rangehigh = bands[0].rangehigh; ret = 0; } else if (v->index == 1) { strscpy(v->name, "AirSpy RF", sizeof(v->name)); v->type = V4L2_TUNER_RF; v->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS; v->rangelow = bands_rf[0].rangelow; v->rangehigh = bands_rf[0].rangehigh; ret = 0; } else { ret = -EINVAL; } return ret; } static int airspy_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct airspy *s = video_drvdata(file); int ret; if (f->tuner == 0) { f->type = V4L2_TUNER_ADC; f->frequency = s->f_adc; dev_dbg(s->dev, "ADC frequency=%u Hz\n", s->f_adc); ret = 0; } else if (f->tuner == 1) { f->type = V4L2_TUNER_RF; f->frequency = s->f_rf; dev_dbg(s->dev, "RF frequency=%u Hz\n", s->f_rf); ret = 0; } else { ret = -EINVAL; } return ret; } static int airspy_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *f) { struct airspy *s = video_drvdata(file); int ret; u8 buf[4]; if (f->tuner == 0) { s->f_adc = clamp_t(unsigned int, f->frequency, bands[0].rangelow, bands[0].rangehigh); dev_dbg(s->dev, "ADC frequency=%u Hz\n", s->f_adc); ret = 0; } else if (f->tuner == 1) { s->f_rf = clamp_t(unsigned int, f->frequency, bands_rf[0].rangelow, bands_rf[0].rangehigh); dev_dbg(s->dev, "RF frequency=%u Hz\n", s->f_rf); buf[0] = (s->f_rf >> 0) & 0xff; buf[1] = (s->f_rf >> 8) & 0xff; buf[2] = (s->f_rf >> 16) & 0xff; buf[3] = (s->f_rf >> 24) & 0xff; ret = airspy_ctrl_msg(s, CMD_SET_FREQ, 0, 0, buf, 4); } else { ret = -EINVAL; } return ret; } static int airspy_enum_freq_bands(struct file *file, void *priv, struct v4l2_frequency_band *band) { int ret; if (band->tuner == 0) { if (band->index >= ARRAY_SIZE(bands)) { ret = -EINVAL; } else { *band = bands[band->index]; ret = 0; } } else if (band->tuner == 1) { if (band->index >= ARRAY_SIZE(bands_rf)) { ret = -EINVAL; } else { *band = bands_rf[band->index]; ret = 0; } } else { ret = -EINVAL; } return ret; } static const struct v4l2_ioctl_ops airspy_ioctl_ops = { .vidioc_querycap = airspy_querycap, .vidioc_enum_fmt_sdr_cap = airspy_enum_fmt_sdr_cap, .vidioc_g_fmt_sdr_cap = airspy_g_fmt_sdr_cap, .vidioc_s_fmt_sdr_cap = airspy_s_fmt_sdr_cap, .vidioc_try_fmt_sdr_cap = airspy_try_fmt_sdr_cap, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_g_tuner = airspy_g_tuner, .vidioc_s_tuner = airspy_s_tuner, .vidioc_g_frequency = airspy_g_frequency, .vidioc_s_frequency = airspy_s_frequency, .vidioc_enum_freq_bands = airspy_enum_freq_bands, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, .vidioc_log_status = v4l2_ctrl_log_status, }; static const struct v4l2_file_operations airspy_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = vb2_fop_release, .read = vb2_fop_read, .poll = vb2_fop_poll, .mmap = vb2_fop_mmap, .unlocked_ioctl = video_ioctl2, }; static const struct video_device airspy_template = { .name = "AirSpy SDR", .release = video_device_release_empty, .fops = &airspy_fops, .ioctl_ops = &airspy_ioctl_ops, }; static void airspy_video_release(struct v4l2_device *v) { struct airspy *s = container_of(v, struct airspy, v4l2_dev); v4l2_ctrl_handler_free(&s->hdl); v4l2_device_unregister(&s->v4l2_dev); kfree(s->buf); kfree(s); } static int airspy_set_lna_gain(struct airspy *s) { int ret; u8 u8tmp; dev_dbg(s->dev, "lna auto=%d->%d val=%d->%d\n", s->lna_gain_auto->cur.val, s->lna_gain_auto->val, s->lna_gain->cur.val, s->lna_gain->val); ret = airspy_ctrl_msg(s, CMD_SET_LNA_AGC, 0, s->lna_gain_auto->val, &u8tmp, 1); if (ret) goto err; if (s->lna_gain_auto->val == false) { ret = airspy_ctrl_msg(s, CMD_SET_LNA_GAIN, 0, s->lna_gain->val, &u8tmp, 1); if (ret) goto err; } err: if (ret) dev_dbg(s->dev, "failed=%d\n", ret); return ret; } static int airspy_set_mixer_gain(struct airspy *s) { int ret; u8 u8tmp; dev_dbg(s->dev, "mixer auto=%d->%d val=%d->%d\n", s->mixer_gain_auto->cur.val, s->mixer_gain_auto->val, s->mixer_gain->cur.val, s->mixer_gain->val); ret = airspy_ctrl_msg(s, CMD_SET_MIXER_AGC, 0, s->mixer_gain_auto->val, &u8tmp, 1); if (ret) goto err; if (s->mixer_gain_auto->val == false) { ret = airspy_ctrl_msg(s, CMD_SET_MIXER_GAIN, 0, s->mixer_gain->val, &u8tmp, 1); if (ret) goto err; } err: if (ret) dev_dbg(s->dev, "failed=%d\n", ret); return ret; } static int airspy_set_if_gain(struct airspy *s) { int ret; u8 u8tmp; dev_dbg(s->dev, "val=%d->%d\n", s->if_gain->cur.val, s->if_gain->val); ret = airspy_ctrl_msg(s, CMD_SET_VGA_GAIN, 0, s->if_gain->val, &u8tmp, 1); if (ret) dev_dbg(s->dev, "failed=%d\n", ret); return ret; } static int airspy_s_ctrl(struct v4l2_ctrl *ctrl) { struct airspy *s = container_of(ctrl->handler, struct airspy, hdl); int ret; switch (ctrl->id) { case V4L2_CID_RF_TUNER_LNA_GAIN_AUTO: case V4L2_CID_RF_TUNER_LNA_GAIN: ret = airspy_set_lna_gain(s); break; case V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO: case V4L2_CID_RF_TUNER_MIXER_GAIN: ret = airspy_set_mixer_gain(s); break; case V4L2_CID_RF_TUNER_IF_GAIN: ret = airspy_set_if_gain(s); break; default: dev_dbg(s->dev, "unknown ctrl: id=%d name=%s\n", ctrl->id, ctrl->name); ret = -EINVAL; } return ret; } static const struct v4l2_ctrl_ops airspy_ctrl_ops = { .s_ctrl = airspy_s_ctrl, }; static int airspy_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct airspy *s; int ret; u8 u8tmp, *buf; buf = NULL; ret = -ENOMEM; s = kzalloc(sizeof(struct airspy), GFP_KERNEL); if (s == NULL) { dev_err(&intf->dev, "Could not allocate memory for state\n"); return -ENOMEM; } s->buf = kzalloc(BUF_SIZE, GFP_KERNEL); if (!s->buf) goto err_free_mem; buf = kzalloc(BUF_SIZE, GFP_KERNEL); if (!buf) goto err_free_mem; mutex_init(&s->v4l2_lock); mutex_init(&s->vb_queue_lock); spin_lock_init(&s->queued_bufs_lock); INIT_LIST_HEAD(&s->queued_bufs); s->dev = &intf->dev; s->udev = interface_to_usbdev(intf); s->f_adc = bands[0].rangelow; s->f_rf = bands_rf[0].rangelow; s->pixelformat = formats[0].pixelformat; s->buffersize = formats[0].buffersize; /* Detect device */ ret = airspy_ctrl_msg(s, CMD_BOARD_ID_READ, 0, 0, &u8tmp, 1); if (ret == 0) ret = airspy_ctrl_msg(s, CMD_VERSION_STRING_READ, 0, 0, buf, BUF_SIZE); if (ret) { dev_err(s->dev, "Could not detect board\n"); goto err_free_mem; } buf[BUF_SIZE - 1] = '\0'; dev_info(s->dev, "Board ID: %02x\n", u8tmp); dev_info(s->dev, "Firmware version: %s\n", buf); /* Init videobuf2 queue structure */ s->vb_queue.type = V4L2_BUF_TYPE_SDR_CAPTURE; s->vb_queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ; s->vb_queue.drv_priv = s; s->vb_queue.buf_struct_size = sizeof(struct airspy_frame_buf); s->vb_queue.ops = &airspy_vb2_ops; s->vb_queue.mem_ops = &vb2_vmalloc_memops; s->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; s->vb_queue.lock = &s->vb_queue_lock; ret = vb2_queue_init(&s->vb_queue); if (ret) { dev_err(s->dev, "Could not initialize vb2 queue\n"); goto err_free_mem; } /* Init video_device structure */ s->vdev = airspy_template; s->vdev.queue = &s->vb_queue; video_set_drvdata(&s->vdev, s); /* Register the v4l2_device structure */ s->v4l2_dev.release = airspy_video_release; ret = v4l2_device_register(&intf->dev, &s->v4l2_dev); if (ret) { dev_err(s->dev, "Failed to register v4l2-device (%d)\n", ret); goto err_free_mem; } /* Register controls */ v4l2_ctrl_handler_init(&s->hdl, 5); s->lna_gain_auto = v4l2_ctrl_new_std(&s->hdl, &airspy_ctrl_ops, V4L2_CID_RF_TUNER_LNA_GAIN_AUTO, 0, 1, 1, 0); s->lna_gain = v4l2_ctrl_new_std(&s->hdl, &airspy_ctrl_ops, V4L2_CID_RF_TUNER_LNA_GAIN, 0, 14, 1, 8); v4l2_ctrl_auto_cluster(2, &s->lna_gain_auto, 0, false); s->mixer_gain_auto = v4l2_ctrl_new_std(&s->hdl, &airspy_ctrl_ops, V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO, 0, 1, 1, 0); s->mixer_gain = v4l2_ctrl_new_std(&s->hdl, &airspy_ctrl_ops, V4L2_CID_RF_TUNER_MIXER_GAIN, 0, 15, 1, 8); v4l2_ctrl_auto_cluster(2, &s->mixer_gain_auto, 0, false); s->if_gain = v4l2_ctrl_new_std(&s->hdl, &airspy_ctrl_ops, V4L2_CID_RF_TUNER_IF_GAIN, 0, 15, 1, 0); if (s->hdl.error) { ret = s->hdl.error; dev_err(s->dev, "Could not initialize controls\n"); goto err_free_controls; } v4l2_ctrl_handler_setup(&s->hdl); s->v4l2_dev.ctrl_handler = &s->hdl; s->vdev.v4l2_dev = &s->v4l2_dev; s->vdev.lock = &s->v4l2_lock; s->vdev.device_caps = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_STREAMING | V4L2_CAP_READWRITE | V4L2_CAP_TUNER; ret = video_register_device(&s->vdev, VFL_TYPE_SDR, -1); if (ret) { dev_err(s->dev, "Failed to register as video device (%d)\n", ret); goto err_free_controls; } /* Free buf if success*/ kfree(buf); dev_info(s->dev, "Registered as %s\n", video_device_node_name(&s->vdev)); dev_notice(s->dev, "SDR API is still slightly experimental and functionality changes may follow\n"); return 0; err_free_controls: v4l2_ctrl_handler_free(&s->hdl); v4l2_device_unregister(&s->v4l2_dev); err_free_mem: kfree(buf); kfree(s->buf); kfree(s); return ret; } /* USB device ID list */ static const struct usb_device_id airspy_id_table[] = { { USB_DEVICE(0x1d50, 0x60a1) }, /* AirSpy */ { } }; MODULE_DEVICE_TABLE(usb, airspy_id_table); /* USB subsystem interface */ static struct usb_driver airspy_driver = { .name = KBUILD_MODNAME, .probe = airspy_probe, .disconnect = airspy_disconnect, .id_table = airspy_id_table, }; module_usb_driver(airspy_driver); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>"); MODULE_DESCRIPTION("AirSpy SDR"); MODULE_LICENSE("GPL");
140 122 122 120 122 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 // SPDX-License-Identifier: GPL-2.0 /* * fs/partitions/amiga.c * * Code extracted from drivers/block/genhd.c * * Copyright (C) 1991-1998 Linus Torvalds * Re-organised Feb 1998 Russell King */ #define pr_fmt(fmt) fmt #include <linux/types.h> #include <linux/mm_types.h> #include <linux/overflow.h> #include <linux/affs_hardblocks.h> #include "check.h" /* magic offsets in partition DosEnvVec */ #define NR_HD 3 #define NR_SECT 5 #define LO_CYL 9 #define HI_CYL 10 static __inline__ u32 checksum_block(__be32 *m, int size) { u32 sum = 0; while (size--) sum += be32_to_cpu(*m++); return sum; } int amiga_partition(struct parsed_partitions *state) { Sector sect; unsigned char *data; struct RigidDiskBlock *rdb; struct PartitionBlock *pb; u64 start_sect, nr_sects; sector_t blk, end_sect; u32 cylblk; /* rdb_CylBlocks = nr_heads*sect_per_track */ u32 nr_hd, nr_sect, lo_cyl, hi_cyl; int part, res = 0; unsigned int blksize = 1; /* Multiplier for disk block size */ int slot = 1; for (blk = 0; ; blk++, put_dev_sector(sect)) { if (blk == RDB_ALLOCATION_LIMIT) goto rdb_done; data = read_part_sector(state, blk, &sect); if (!data) { pr_err("Dev %s: unable to read RDB block %llu\n", state->disk->disk_name, blk); res = -1; goto rdb_done; } if (*(__be32 *)data != cpu_to_be32(IDNAME_RIGIDDISK)) continue; rdb = (struct RigidDiskBlock *)data; if (checksum_block((__be32 *)data, be32_to_cpu(rdb->rdb_SummedLongs) & 0x7F) == 0) break; /* Try again with 0xdc..0xdf zeroed, Windows might have * trashed it. */ *(__be32 *)(data+0xdc) = 0; if (checksum_block((__be32 *)data, be32_to_cpu(rdb->rdb_SummedLongs) & 0x7F)==0) { pr_err("Trashed word at 0xd0 in block %llu ignored in checksum calculation\n", blk); break; } pr_err("Dev %s: RDB in block %llu has bad checksum\n", state->disk->disk_name, blk); } /* blksize is blocks per 512 byte standard block */ blksize = be32_to_cpu( rdb->rdb_BlockBytes ) / 512; { char tmp[7 + 10 + 1 + 1]; /* Be more informative */ snprintf(tmp, sizeof(tmp), " RDSK (%d)", blksize * 512); strlcat(state->pp_buf, tmp, PAGE_SIZE); } blk = be32_to_cpu(rdb->rdb_PartitionList); put_dev_sector(sect); for (part = 1; (s32) blk>0 && part<=16; part++, put_dev_sector(sect)) { /* Read in terms partition table understands */ if (check_mul_overflow(blk, (sector_t) blksize, &blk)) { pr_err("Dev %s: overflow calculating partition block %llu! Skipping partitions %u and beyond\n", state->disk->disk_name, blk, part); break; } data = read_part_sector(state, blk, &sect); if (!data) { pr_err("Dev %s: unable to read partition block %llu\n", state->disk->disk_name, blk); res = -1; goto rdb_done; } pb = (struct PartitionBlock *)data; blk = be32_to_cpu(pb->pb_Next); if (pb->pb_ID != cpu_to_be32(IDNAME_PARTITION)) continue; if (checksum_block((__be32 *)pb, be32_to_cpu(pb->pb_SummedLongs) & 0x7F) != 0 ) continue; /* RDB gives us more than enough rope to hang ourselves with, * many times over (2^128 bytes if all fields max out). * Some careful checks are in order, so check for potential * overflows. * We are multiplying four 32 bit numbers to one sector_t! */ nr_hd = be32_to_cpu(pb->pb_Environment[NR_HD]); nr_sect = be32_to_cpu(pb->pb_Environment[NR_SECT]); /* CylBlocks is total number of blocks per cylinder */ if (check_mul_overflow(nr_hd, nr_sect, &cylblk)) { pr_err("Dev %s: heads*sects %u overflows u32, skipping partition!\n", state->disk->disk_name, cylblk); continue; } /* check for consistency with RDB defined CylBlocks */ if (cylblk > be32_to_cpu(rdb->rdb_CylBlocks)) { pr_warn("Dev %s: cylblk %u > rdb_CylBlocks %u!\n", state->disk->disk_name, cylblk, be32_to_cpu(rdb->rdb_CylBlocks)); } /* RDB allows for variable logical block size - * normalize to 512 byte blocks and check result. */ if (check_mul_overflow(cylblk, blksize, &cylblk)) { pr_err("Dev %s: partition %u bytes per cyl. overflows u32, skipping partition!\n", state->disk->disk_name, part); continue; } /* Calculate partition start and end. Limit of 32 bit on cylblk * guarantees no overflow occurs if LBD support is enabled. */ lo_cyl = be32_to_cpu(pb->pb_Environment[LO_CYL]); start_sect = ((u64) lo_cyl * cylblk); hi_cyl = be32_to_cpu(pb->pb_Environment[HI_CYL]); nr_sects = (((u64) hi_cyl - lo_cyl + 1) * cylblk); if (!nr_sects) continue; /* Warn user if partition end overflows u32 (AmigaDOS limit) */ if ((start_sect + nr_sects) > UINT_MAX) { pr_warn("Dev %s: partition %u (%llu-%llu) needs 64 bit device support!\n", state->disk->disk_name, part, start_sect, start_sect + nr_sects); } if (check_add_overflow(start_sect, nr_sects, &end_sect)) { pr_err("Dev %s: partition %u (%llu-%llu) needs LBD device support, skipping partition!\n", state->disk->disk_name, part, start_sect, end_sect); continue; } /* Tell Kernel about it */ put_partition(state,slot++,start_sect,nr_sects); { /* Be even more informative to aid mounting */ char dostype[4]; char tmp[42]; __be32 *dt = (__be32 *)dostype; *dt = pb->pb_Environment[16]; if (dostype[3] < ' ') snprintf(tmp, sizeof(tmp), " (%c%c%c^%c)", dostype[0], dostype[1], dostype[2], dostype[3] + '@' ); else snprintf(tmp, sizeof(tmp), " (%c%c%c%c)", dostype[0], dostype[1], dostype[2], dostype[3]); strlcat(state->pp_buf, tmp, PAGE_SIZE); snprintf(tmp, sizeof(tmp), "(res %d spb %d)", be32_to_cpu(pb->pb_Environment[6]), be32_to_cpu(pb->pb_Environment[4])); strlcat(state->pp_buf, tmp, PAGE_SIZE); } res = 1; } strlcat(state->pp_buf, "\n", PAGE_SIZE); rdb_done: return res; }
11 11 11 11 1 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 // SPDX-License-Identifier: GPL-2.0 /* * security/tomoyo/gc.c * * Copyright (C) 2005-2011 NTT DATA CORPORATION */ #include "common.h" #include <linux/kthread.h> #include <linux/slab.h> /** * tomoyo_memory_free - Free memory for elements. * * @ptr: Pointer to allocated memory. * * Returns nothing. * * Caller holds tomoyo_policy_lock mutex. */ static inline void tomoyo_memory_free(void *ptr) { tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= ksize(ptr); kfree(ptr); } /* The list for "struct tomoyo_io_buffer". */ static LIST_HEAD(tomoyo_io_buffer_list); /* Lock for protecting tomoyo_io_buffer_list. */ static DEFINE_SPINLOCK(tomoyo_io_buffer_list_lock); /** * tomoyo_struct_used_by_io_buffer - Check whether the list element is used by /sys/kernel/security/tomoyo/ users or not. * * @element: Pointer to "struct list_head". * * Returns true if @element is used by /sys/kernel/security/tomoyo/ users, * false otherwise. */ static bool tomoyo_struct_used_by_io_buffer(const struct list_head *element) { struct tomoyo_io_buffer *head; bool in_use = false; spin_lock(&tomoyo_io_buffer_list_lock); list_for_each_entry(head, &tomoyo_io_buffer_list, list) { head->users++; spin_unlock(&tomoyo_io_buffer_list_lock); mutex_lock(&head->io_sem); if (head->r.domain == element || head->r.group == element || head->r.acl == element || &head->w.domain->list == element) in_use = true; mutex_unlock(&head->io_sem); spin_lock(&tomoyo_io_buffer_list_lock); head->users--; if (in_use) break; } spin_unlock(&tomoyo_io_buffer_list_lock); return in_use; } /** * tomoyo_name_used_by_io_buffer - Check whether the string is used by /sys/kernel/security/tomoyo/ users or not. * * @string: String to check. * * Returns true if @string is used by /sys/kernel/security/tomoyo/ users, * false otherwise. */ static bool tomoyo_name_used_by_io_buffer(const char *string) { struct tomoyo_io_buffer *head; const size_t size = strlen(string) + 1; bool in_use = false; spin_lock(&tomoyo_io_buffer_list_lock); list_for_each_entry(head, &tomoyo_io_buffer_list, list) { int i; head->users++; spin_unlock(&tomoyo_io_buffer_list_lock); mutex_lock(&head->io_sem); for (i = 0; i < TOMOYO_MAX_IO_READ_QUEUE; i++) { const char *w = head->r.w[i]; if (w < string || w > string + size) continue; in_use = true; break; } mutex_unlock(&head->io_sem); spin_lock(&tomoyo_io_buffer_list_lock); head->users--; if (in_use) break; } spin_unlock(&tomoyo_io_buffer_list_lock); return in_use; } /** * tomoyo_del_transition_control - Delete members in "struct tomoyo_transition_control". * * @element: Pointer to "struct list_head". * * Returns nothing. */ static inline void tomoyo_del_transition_control(struct list_head *element) { struct tomoyo_transition_control *ptr = container_of(element, typeof(*ptr), head.list); tomoyo_put_name(ptr->domainname); tomoyo_put_name(ptr->program); } /** * tomoyo_del_aggregator - Delete members in "struct tomoyo_aggregator". * * @element: Pointer to "struct list_head". * * Returns nothing. */ static inline void tomoyo_del_aggregator(struct list_head *element) { struct tomoyo_aggregator *ptr = container_of(element, typeof(*ptr), head.list); tomoyo_put_name(ptr->original_name); tomoyo_put_name(ptr->aggregated_name); } /** * tomoyo_del_manager - Delete members in "struct tomoyo_manager". * * @element: Pointer to "struct list_head". * * Returns nothing. */ static inline void tomoyo_del_manager(struct list_head *element) { struct tomoyo_manager *ptr = container_of(element, typeof(*ptr), head.list); tomoyo_put_name(ptr->manager); } /** * tomoyo_del_acl - Delete members in "struct tomoyo_acl_info". * * @element: Pointer to "struct list_head". * * Returns nothing. */ static void tomoyo_del_acl(struct list_head *element) { struct tomoyo_acl_info *acl = container_of(element, typeof(*acl), list); tomoyo_put_condition(acl->cond); switch (acl->type) { case TOMOYO_TYPE_PATH_ACL: { struct tomoyo_path_acl *entry = container_of(acl, typeof(*entry), head); tomoyo_put_name_union(&entry->name); } break; case TOMOYO_TYPE_PATH2_ACL: { struct tomoyo_path2_acl *entry = container_of(acl, typeof(*entry), head); tomoyo_put_name_union(&entry->name1); tomoyo_put_name_union(&entry->name2); } break; case TOMOYO_TYPE_PATH_NUMBER_ACL: { struct tomoyo_path_number_acl *entry = container_of(acl, typeof(*entry), head); tomoyo_put_name_union(&entry->name); tomoyo_put_number_union(&entry->number); } break; case TOMOYO_TYPE_MKDEV_ACL: { struct tomoyo_mkdev_acl *entry = container_of(acl, typeof(*entry), head); tomoyo_put_name_union(&entry->name); tomoyo_put_number_union(&entry->mode); tomoyo_put_number_union(&entry->major); tomoyo_put_number_union(&entry->minor); } break; case TOMOYO_TYPE_MOUNT_ACL: { struct tomoyo_mount_acl *entry = container_of(acl, typeof(*entry), head); tomoyo_put_name_union(&entry->dev_name); tomoyo_put_name_union(&entry->dir_name); tomoyo_put_name_union(&entry->fs_type); tomoyo_put_number_union(&entry->flags); } break; case TOMOYO_TYPE_ENV_ACL: { struct tomoyo_env_acl *entry = container_of(acl, typeof(*entry), head); tomoyo_put_name(entry->env); } break; case TOMOYO_TYPE_INET_ACL: { struct tomoyo_inet_acl *entry = container_of(acl, typeof(*entry), head); tomoyo_put_group(entry->address.group); tomoyo_put_number_union(&entry->port); } break; case TOMOYO_TYPE_UNIX_ACL: { struct tomoyo_unix_acl *entry = container_of(acl, typeof(*entry), head); tomoyo_put_name_union(&entry->name); } break; case TOMOYO_TYPE_MANUAL_TASK_ACL: { struct tomoyo_task_acl *entry = container_of(acl, typeof(*entry), head); tomoyo_put_name(entry->domainname); } break; } } /** * tomoyo_del_domain - Delete members in "struct tomoyo_domain_info". * * @element: Pointer to "struct list_head". * * Returns nothing. * * Caller holds tomoyo_policy_lock mutex. */ static inline void tomoyo_del_domain(struct list_head *element) { struct tomoyo_domain_info *domain = container_of(element, typeof(*domain), list); struct tomoyo_acl_info *acl; struct tomoyo_acl_info *tmp; /* * Since this domain is referenced from neither * "struct tomoyo_io_buffer" nor "struct cred"->security, we can delete * elements without checking for is_deleted flag. */ list_for_each_entry_safe(acl, tmp, &domain->acl_info_list, list) { tomoyo_del_acl(&acl->list); tomoyo_memory_free(acl); } tomoyo_put_name(domain->domainname); } /** * tomoyo_del_condition - Delete members in "struct tomoyo_condition". * * @element: Pointer to "struct list_head". * * Returns nothing. */ void tomoyo_del_condition(struct list_head *element) { struct tomoyo_condition *cond = container_of(element, typeof(*cond), head.list); const u16 condc = cond->condc; const u16 numbers_count = cond->numbers_count; const u16 names_count = cond->names_count; const u16 argc = cond->argc; const u16 envc = cond->envc; unsigned int i; const struct tomoyo_condition_element *condp = (const struct tomoyo_condition_element *) (cond + 1); struct tomoyo_number_union *numbers_p = (struct tomoyo_number_union *) (condp + condc); struct tomoyo_name_union *names_p = (struct tomoyo_name_union *) (numbers_p + numbers_count); const struct tomoyo_argv *argv = (const struct tomoyo_argv *) (names_p + names_count); const struct tomoyo_envp *envp = (const struct tomoyo_envp *) (argv + argc); for (i = 0; i < numbers_count; i++) tomoyo_put_number_union(numbers_p++); for (i = 0; i < names_count; i++) tomoyo_put_name_union(names_p++); for (i = 0; i < argc; argv++, i++) tomoyo_put_name(argv->value); for (i = 0; i < envc; envp++, i++) { tomoyo_put_name(envp->name); tomoyo_put_name(envp->value); } } /** * tomoyo_del_name - Delete members in "struct tomoyo_name". * * @element: Pointer to "struct list_head". * * Returns nothing. */ static inline void tomoyo_del_name(struct list_head *element) { /* Nothing to do. */ } /** * tomoyo_del_path_group - Delete members in "struct tomoyo_path_group". * * @element: Pointer to "struct list_head". * * Returns nothing. */ static inline void tomoyo_del_path_group(struct list_head *element) { struct tomoyo_path_group *member = container_of(element, typeof(*member), head.list); tomoyo_put_name(member->member_name); } /** * tomoyo_del_group - Delete "struct tomoyo_group". * * @element: Pointer to "struct list_head". * * Returns nothing. */ static inline void tomoyo_del_group(struct list_head *element) { struct tomoyo_group *group = container_of(element, typeof(*group), head.list); tomoyo_put_name(group->group_name); } /** * tomoyo_del_address_group - Delete members in "struct tomoyo_address_group". * * @element: Pointer to "struct list_head". * * Returns nothing. */ static inline void tomoyo_del_address_group(struct list_head *element) { /* Nothing to do. */ } /** * tomoyo_del_number_group - Delete members in "struct tomoyo_number_group". * * @element: Pointer to "struct list_head". * * Returns nothing. */ static inline void tomoyo_del_number_group(struct list_head *element) { /* Nothing to do. */ } /** * tomoyo_try_to_gc - Try to kfree() an entry. * * @type: One of values in "enum tomoyo_policy_id". * @element: Pointer to "struct list_head". * * Returns nothing. * * Caller holds tomoyo_policy_lock mutex. */ static void tomoyo_try_to_gc(const enum tomoyo_policy_id type, struct list_head *element) { /* * __list_del_entry() guarantees that the list element became no longer * reachable from the list which the element was originally on (e.g. * tomoyo_domain_list). Also, synchronize_srcu() guarantees that the * list element became no longer referenced by syscall users. */ __list_del_entry(element); mutex_unlock(&tomoyo_policy_lock); synchronize_srcu(&tomoyo_ss); /* * However, there are two users which may still be using the list * element. We need to defer until both users forget this element. * * Don't kfree() until "struct tomoyo_io_buffer"->r.{domain,group,acl} * and "struct tomoyo_io_buffer"->w.domain forget this element. */ if (tomoyo_struct_used_by_io_buffer(element)) goto reinject; switch (type) { case TOMOYO_ID_TRANSITION_CONTROL: tomoyo_del_transition_control(element); break; case TOMOYO_ID_MANAGER: tomoyo_del_manager(element); break; case TOMOYO_ID_AGGREGATOR: tomoyo_del_aggregator(element); break; case TOMOYO_ID_GROUP: tomoyo_del_group(element); break; case TOMOYO_ID_PATH_GROUP: tomoyo_del_path_group(element); break; case TOMOYO_ID_ADDRESS_GROUP: tomoyo_del_address_group(element); break; case TOMOYO_ID_NUMBER_GROUP: tomoyo_del_number_group(element); break; case TOMOYO_ID_CONDITION: tomoyo_del_condition(element); break; case TOMOYO_ID_NAME: /* * Don't kfree() until all "struct tomoyo_io_buffer"->r.w[] * forget this element. */ if (tomoyo_name_used_by_io_buffer (container_of(element, typeof(struct tomoyo_name), head.list)->entry.name)) goto reinject; tomoyo_del_name(element); break; case TOMOYO_ID_ACL: tomoyo_del_acl(element); break; case TOMOYO_ID_DOMAIN: /* * Don't kfree() until all "struct cred"->security forget this * element. */ if (atomic_read(&container_of (element, typeof(struct tomoyo_domain_info), list)->users)) goto reinject; break; case TOMOYO_MAX_POLICY: break; } mutex_lock(&tomoyo_policy_lock); if (type == TOMOYO_ID_DOMAIN) tomoyo_del_domain(element); tomoyo_memory_free(element); return; reinject: /* * We can safely reinject this element here because * (1) Appending list elements and removing list elements are protected * by tomoyo_policy_lock mutex. * (2) Only this function removes list elements and this function is * exclusively executed by tomoyo_gc_mutex mutex. * are true. */ mutex_lock(&tomoyo_policy_lock); list_add_rcu(element, element->prev); } /** * tomoyo_collect_member - Delete elements with "struct tomoyo_acl_head". * * @id: One of values in "enum tomoyo_policy_id". * @member_list: Pointer to "struct list_head". * * Returns nothing. */ static void tomoyo_collect_member(const enum tomoyo_policy_id id, struct list_head *member_list) { struct tomoyo_acl_head *member; struct tomoyo_acl_head *tmp; list_for_each_entry_safe(member, tmp, member_list, list) { if (!member->is_deleted) continue; member->is_deleted = TOMOYO_GC_IN_PROGRESS; tomoyo_try_to_gc(id, &member->list); } } /** * tomoyo_collect_acl - Delete elements in "struct tomoyo_domain_info". * * @list: Pointer to "struct list_head". * * Returns nothing. */ static void tomoyo_collect_acl(struct list_head *list) { struct tomoyo_acl_info *acl; struct tomoyo_acl_info *tmp; list_for_each_entry_safe(acl, tmp, list, list) { if (!acl->is_deleted) continue; acl->is_deleted = TOMOYO_GC_IN_PROGRESS; tomoyo_try_to_gc(TOMOYO_ID_ACL, &acl->list); } } /** * tomoyo_collect_entry - Try to kfree() deleted elements. * * Returns nothing. */ static void tomoyo_collect_entry(void) { int i; enum tomoyo_policy_id id; struct tomoyo_policy_namespace *ns; mutex_lock(&tomoyo_policy_lock); { struct tomoyo_domain_info *domain; struct tomoyo_domain_info *tmp; list_for_each_entry_safe(domain, tmp, &tomoyo_domain_list, list) { tomoyo_collect_acl(&domain->acl_info_list); if (!domain->is_deleted || atomic_read(&domain->users)) continue; tomoyo_try_to_gc(TOMOYO_ID_DOMAIN, &domain->list); } } list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) { for (id = 0; id < TOMOYO_MAX_POLICY; id++) tomoyo_collect_member(id, &ns->policy_list[id]); for (i = 0; i < TOMOYO_MAX_ACL_GROUPS; i++) tomoyo_collect_acl(&ns->acl_group[i]); } { struct tomoyo_shared_acl_head *ptr; struct tomoyo_shared_acl_head *tmp; list_for_each_entry_safe(ptr, tmp, &tomoyo_condition_list, list) { if (atomic_read(&ptr->users) > 0) continue; atomic_set(&ptr->users, TOMOYO_GC_IN_PROGRESS); tomoyo_try_to_gc(TOMOYO_ID_CONDITION, &ptr->list); } } list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) { for (i = 0; i < TOMOYO_MAX_GROUP; i++) { struct list_head *list = &ns->group_list[i]; struct tomoyo_group *group; struct tomoyo_group *tmp; switch (i) { case 0: id = TOMOYO_ID_PATH_GROUP; break; case 1: id = TOMOYO_ID_NUMBER_GROUP; break; default: id = TOMOYO_ID_ADDRESS_GROUP; break; } list_for_each_entry_safe(group, tmp, list, head.list) { tomoyo_collect_member(id, &group->member_list); if (!list_empty(&group->member_list) || atomic_read(&group->head.users) > 0) continue; atomic_set(&group->head.users, TOMOYO_GC_IN_PROGRESS); tomoyo_try_to_gc(TOMOYO_ID_GROUP, &group->head.list); } } } for (i = 0; i < TOMOYO_MAX_HASH; i++) { struct list_head *list = &tomoyo_name_list[i]; struct tomoyo_shared_acl_head *ptr; struct tomoyo_shared_acl_head *tmp; list_for_each_entry_safe(ptr, tmp, list, list) { if (atomic_read(&ptr->users) > 0) continue; atomic_set(&ptr->users, TOMOYO_GC_IN_PROGRESS); tomoyo_try_to_gc(TOMOYO_ID_NAME, &ptr->list); } } mutex_unlock(&tomoyo_policy_lock); } /** * tomoyo_gc_thread - Garbage collector thread function. * * @unused: Unused. * * Returns 0. */ static int tomoyo_gc_thread(void *unused) { /* Garbage collector thread is exclusive. */ static DEFINE_MUTEX(tomoyo_gc_mutex); if (!mutex_trylock(&tomoyo_gc_mutex)) goto out; tomoyo_collect_entry(); { struct tomoyo_io_buffer *head; struct tomoyo_io_buffer *tmp; spin_lock(&tomoyo_io_buffer_list_lock); list_for_each_entry_safe(head, tmp, &tomoyo_io_buffer_list, list) { if (head->users) continue; list_del(&head->list); kfree(head->read_buf); kfree(head->write_buf); kfree(head); } spin_unlock(&tomoyo_io_buffer_list_lock); } mutex_unlock(&tomoyo_gc_mutex); out: /* This acts as do_exit(0). */ return 0; } /** * tomoyo_notify_gc - Register/unregister /sys/kernel/security/tomoyo/ users. * * @head: Pointer to "struct tomoyo_io_buffer". * @is_register: True if register, false if unregister. * * Returns nothing. */ void tomoyo_notify_gc(struct tomoyo_io_buffer *head, const bool is_register) { bool is_write = false; spin_lock(&tomoyo_io_buffer_list_lock); if (is_register) { head->users = 1; list_add(&head->list, &tomoyo_io_buffer_list); } else { is_write = head->write_buf != NULL; if (!--head->users) { list_del(&head->list); kfree(head->read_buf); kfree(head->write_buf); kfree(head); } } spin_unlock(&tomoyo_io_buffer_list_lock); if (is_write) kthread_run(tomoyo_gc_thread, NULL, "GC for TOMOYO"); }
11 3 8 12 1 11 11 11 19 1 18 13 2 3 2 13 2 13 12 5 7 12 2 1 2 9 11 6 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 // SPDX-License-Identifier: GPL-2.0-or-later /* RxRPC recvmsg() implementation * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/net.h> #include <linux/skbuff.h> #include <linux/export.h> #include <linux/sched/signal.h> #include <net/sock.h> #include <net/af_rxrpc.h> #include "ar-internal.h" /* * Post a call for attention by the socket or kernel service. Further * notifications are suppressed by putting recvmsg_link on a dummy queue. */ void rxrpc_notify_socket(struct rxrpc_call *call) { struct rxrpc_sock *rx; struct sock *sk; _enter("%d", call->debug_id); if (!list_empty(&call->recvmsg_link)) return; if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) { rxrpc_see_call(call, rxrpc_call_see_notify_released); return; } rcu_read_lock(); rx = rcu_dereference(call->socket); sk = &rx->sk; if (rx && sk->sk_state < RXRPC_CLOSE) { if (call->notify_rx) { spin_lock_irq(&call->notify_lock); call->notify_rx(sk, call, call->user_call_ID); spin_unlock_irq(&call->notify_lock); } else { spin_lock_irq(&rx->recvmsg_lock); if (list_empty(&call->recvmsg_link)) { rxrpc_get_call(call, rxrpc_call_get_notify_socket); list_add_tail(&call->recvmsg_link, &rx->recvmsg_q); } spin_unlock_irq(&rx->recvmsg_lock); if (!sock_flag(sk, SOCK_DEAD)) { _debug("call %ps", sk->sk_data_ready); sk->sk_data_ready(sk); } } } rcu_read_unlock(); _leave(""); } /* * Pass a call terminating message to userspace. */ static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg) { u32 tmp = 0; int ret; switch (call->completion) { case RXRPC_CALL_SUCCEEDED: ret = 0; if (rxrpc_is_service_call(call)) ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ACK, 0, &tmp); break; case RXRPC_CALL_REMOTELY_ABORTED: tmp = call->abort_code; ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &tmp); break; case RXRPC_CALL_LOCALLY_ABORTED: tmp = call->abort_code; ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &tmp); break; case RXRPC_CALL_NETWORK_ERROR: tmp = -call->error; ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NET_ERROR, 4, &tmp); break; case RXRPC_CALL_LOCAL_ERROR: tmp = -call->error; ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4, &tmp); break; default: pr_err("Invalid terminal call state %u\n", call->completion); BUG(); break; } trace_rxrpc_recvdata(call, rxrpc_recvmsg_terminal, call->ackr_window - 1, call->rx_pkt_offset, call->rx_pkt_len, ret); return ret; } /* * Discard a packet we've used up and advance the Rx window by one. */ static void rxrpc_rotate_rx_window(struct rxrpc_call *call) { struct rxrpc_skb_priv *sp; struct sk_buff *skb; rxrpc_serial_t serial; rxrpc_seq_t old_consumed = call->rx_consumed, tseq; bool last; int acked; _enter("%d", call->debug_id); skb = skb_dequeue(&call->recvmsg_queue); rxrpc_see_skb(skb, rxrpc_skb_see_rotate); sp = rxrpc_skb(skb); tseq = sp->hdr.seq; serial = sp->hdr.serial; last = sp->hdr.flags & RXRPC_LAST_PACKET; /* Barrier against rxrpc_input_data(). */ if (after(tseq, call->rx_consumed)) smp_store_release(&call->rx_consumed, tseq); rxrpc_free_skb(skb, rxrpc_skb_put_rotate); trace_rxrpc_receive(call, last ? rxrpc_receive_rotate_last : rxrpc_receive_rotate, serial, call->rx_consumed); if (last) set_bit(RXRPC_CALL_RECVMSG_READ_ALL, &call->flags); /* Check to see if there's an ACK that needs sending. */ acked = atomic_add_return(call->rx_consumed - old_consumed, &call->ackr_nr_consumed); if (acked > 8 && !test_and_set_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags)) rxrpc_poke_call(call, rxrpc_call_poke_idle); } /* * Decrypt and verify a DATA packet. */ static int rxrpc_verify_data(struct rxrpc_call *call, struct sk_buff *skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); if (sp->flags & RXRPC_RX_VERIFIED) return 0; return call->security->verify_packet(call, skb); } /* * Transcribe a call's user ID to a control message. */ static int rxrpc_recvmsg_user_id(struct rxrpc_call *call, struct msghdr *msg, int flags) { if (!test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) return 0; if (flags & MSG_CMSG_COMPAT) { unsigned int id32 = call->user_call_ID; return put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, sizeof(unsigned int), &id32); } else { unsigned long idl = call->user_call_ID; return put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, sizeof(unsigned long), &idl); } } /* * Deal with a CHALLENGE packet. */ static int rxrpc_recvmsg_challenge(struct socket *sock, struct msghdr *msg, struct sk_buff *challenge, unsigned int flags) { struct rxrpc_skb_priv *sp = rxrpc_skb(challenge); struct rxrpc_connection *conn = sp->chall.conn; return conn->security->challenge_to_recvmsg(conn, challenge, msg); } /* * Process OOB packets. Called with the socket locked. */ static int rxrpc_recvmsg_oob(struct socket *sock, struct msghdr *msg, unsigned int flags) { struct rxrpc_sock *rx = rxrpc_sk(sock->sk); struct sk_buff *skb; bool need_response = false; int ret; skb = skb_peek(&rx->recvmsg_oobq); if (!skb) return -EAGAIN; rxrpc_see_skb(skb, rxrpc_skb_see_recvmsg); ret = put_cmsg(msg, SOL_RXRPC, RXRPC_OOB_ID, sizeof(u64), &skb->skb_mstamp_ns); if (ret < 0) return ret; switch ((enum rxrpc_oob_type)skb->mark) { case RXRPC_OOB_CHALLENGE: need_response = true; ret = rxrpc_recvmsg_challenge(sock, msg, skb, flags); break; default: WARN_ONCE(1, "recvmsg() can't process unknown OOB type %u\n", skb->mark); ret = -EIO; break; } if (!(flags & MSG_PEEK)) skb_unlink(skb, &rx->recvmsg_oobq); if (need_response) rxrpc_add_pending_oob(rx, skb); else rxrpc_free_skb(skb, rxrpc_skb_put_oob); return ret; } /* * Deliver messages to a call. This keeps processing packets until the buffer * is filled and we find either more DATA (returns 0) or the end of the DATA * (returns 1). If more packets are required, it returns -EAGAIN and if the * call has failed it returns -EIO. */ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call, struct msghdr *msg, struct iov_iter *iter, size_t len, int flags, size_t *_offset) { struct rxrpc_skb_priv *sp; struct rxrpc_sock *rx = rxrpc_sk(sock->sk); struct sk_buff *skb; rxrpc_seq_t seq = 0; size_t remain; unsigned int rx_pkt_offset, rx_pkt_len; int copy, ret = -EAGAIN, ret2; rx_pkt_offset = call->rx_pkt_offset; rx_pkt_len = call->rx_pkt_len; if (rxrpc_call_has_failed(call)) { seq = call->ackr_window - 1; ret = -EIO; goto done; } if (test_bit(RXRPC_CALL_RECVMSG_READ_ALL, &call->flags)) { seq = call->ackr_window - 1; ret = 1; goto done; } /* No one else can be removing stuff from the queue, so we shouldn't * need the Rx lock to walk it. */ skb = skb_peek(&call->recvmsg_queue); while (skb) { rxrpc_see_skb(skb, rxrpc_skb_see_recvmsg); sp = rxrpc_skb(skb); seq = sp->hdr.seq; if (!(flags & MSG_PEEK)) trace_rxrpc_receive(call, rxrpc_receive_front, sp->hdr.serial, seq); if (msg) sock_recv_timestamp(msg, sock->sk, skb); if (rx_pkt_offset == 0) { ret2 = rxrpc_verify_data(call, skb); trace_rxrpc_recvdata(call, rxrpc_recvmsg_next, seq, sp->offset, sp->len, ret2); if (ret2 < 0) { ret = ret2; goto out; } rx_pkt_offset = sp->offset; rx_pkt_len = sp->len; } else { trace_rxrpc_recvdata(call, rxrpc_recvmsg_cont, seq, rx_pkt_offset, rx_pkt_len, 0); } /* We have to handle short, empty and used-up DATA packets. */ remain = len - *_offset; copy = rx_pkt_len; if (copy > remain) copy = remain; if (copy > 0) { ret2 = skb_copy_datagram_iter(skb, rx_pkt_offset, iter, copy); if (ret2 < 0) { ret = ret2; goto out; } /* handle piecemeal consumption of data packets */ rx_pkt_offset += copy; rx_pkt_len -= copy; *_offset += copy; } if (rx_pkt_len > 0) { trace_rxrpc_recvdata(call, rxrpc_recvmsg_full, seq, rx_pkt_offset, rx_pkt_len, 0); ASSERTCMP(*_offset, ==, len); ret = 0; break; } /* The whole packet has been transferred. */ if (sp->hdr.flags & RXRPC_LAST_PACKET) ret = 1; rx_pkt_offset = 0; rx_pkt_len = 0; skb = skb_peek_next(skb, &call->recvmsg_queue); if (!(flags & MSG_PEEK)) rxrpc_rotate_rx_window(call); if (!rx->app_ops && !skb_queue_empty_lockless(&rx->recvmsg_oobq)) { trace_rxrpc_recvdata(call, rxrpc_recvmsg_oobq, seq, rx_pkt_offset, rx_pkt_len, ret); break; } } out: if (!(flags & MSG_PEEK)) { call->rx_pkt_offset = rx_pkt_offset; call->rx_pkt_len = rx_pkt_len; } done: trace_rxrpc_recvdata(call, rxrpc_recvmsg_data_return, seq, rx_pkt_offset, rx_pkt_len, ret); if (ret == -EAGAIN) set_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags); return ret; } /* * Receive a message from an RxRPC socket * - we need to be careful about two or more threads calling recvmsg * simultaneously */ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct rxrpc_call *call; struct rxrpc_sock *rx = rxrpc_sk(sock->sk); struct list_head *l; unsigned int call_debug_id = 0; size_t copied = 0; long timeo; int ret; DEFINE_WAIT(wait); trace_rxrpc_recvmsg(0, rxrpc_recvmsg_enter, 0); if (flags & (MSG_OOB | MSG_TRUNC)) return -EOPNOTSUPP; timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT); try_again: lock_sock(&rx->sk); /* Return immediately if a client socket has no outstanding calls */ if (RB_EMPTY_ROOT(&rx->calls) && list_empty(&rx->recvmsg_q) && skb_queue_empty_lockless(&rx->recvmsg_oobq) && rx->sk.sk_state != RXRPC_SERVER_LISTENING) { release_sock(&rx->sk); return -EAGAIN; } if (list_empty(&rx->recvmsg_q)) { ret = -EWOULDBLOCK; if (timeo == 0) { call = NULL; goto error_no_call; } release_sock(&rx->sk); /* Wait for something to happen */ prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait, TASK_INTERRUPTIBLE); ret = sock_error(&rx->sk); if (ret) goto wait_error; if (list_empty(&rx->recvmsg_q) && skb_queue_empty_lockless(&rx->recvmsg_oobq)) { if (signal_pending(current)) goto wait_interrupted; trace_rxrpc_recvmsg(0, rxrpc_recvmsg_wait, 0); timeo = schedule_timeout(timeo); } finish_wait(sk_sleep(&rx->sk), &wait); goto try_again; } /* Deal with OOB messages before we consider getting normal data. */ if (!skb_queue_empty_lockless(&rx->recvmsg_oobq)) { ret = rxrpc_recvmsg_oob(sock, msg, flags); release_sock(&rx->sk); if (ret == -EAGAIN) goto try_again; goto error_no_call; } /* Find the next call and dequeue it if we're not just peeking. If we * do dequeue it, that comes with a ref that we will need to release. * We also want to weed out calls that got requeued whilst we were * shovelling data out. */ spin_lock_irq(&rx->recvmsg_lock); l = rx->recvmsg_q.next; call = list_entry(l, struct rxrpc_call, recvmsg_link); if (!rxrpc_call_is_complete(call) && skb_queue_empty(&call->recvmsg_queue) && skb_queue_empty(&rx->recvmsg_oobq)) { list_del_init(&call->recvmsg_link); spin_unlock_irq(&rx->recvmsg_lock); release_sock(&rx->sk); trace_rxrpc_recvmsg(call->debug_id, rxrpc_recvmsg_unqueue, 0); rxrpc_put_call(call, rxrpc_call_put_recvmsg); goto try_again; } rxrpc_see_call(call, rxrpc_call_see_recvmsg); if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) { rxrpc_see_call(call, rxrpc_call_see_already_released); list_del_init(&call->recvmsg_link); spin_unlock_irq(&rx->recvmsg_lock); release_sock(&rx->sk); trace_rxrpc_recvmsg(call->debug_id, rxrpc_recvmsg_unqueue, 0); rxrpc_put_call(call, rxrpc_call_put_recvmsg); goto try_again; } if (!(flags & MSG_PEEK)) list_del_init(&call->recvmsg_link); else rxrpc_get_call(call, rxrpc_call_get_recvmsg); spin_unlock_irq(&rx->recvmsg_lock); call_debug_id = call->debug_id; trace_rxrpc_recvmsg(call_debug_id, rxrpc_recvmsg_dequeue, 0); /* We're going to drop the socket lock, so we need to lock the call * against interference by sendmsg. */ if (!mutex_trylock(&call->user_mutex)) { ret = -EWOULDBLOCK; if (flags & MSG_DONTWAIT) goto error_requeue_call; ret = -ERESTARTSYS; if (mutex_lock_interruptible(&call->user_mutex) < 0) goto error_requeue_call; } release_sock(&rx->sk); if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) { rxrpc_see_call(call, rxrpc_call_see_already_released); mutex_unlock(&call->user_mutex); if (!(flags & MSG_PEEK)) rxrpc_put_call(call, rxrpc_call_put_recvmsg); goto try_again; } ret = rxrpc_recvmsg_user_id(call, msg, flags); if (ret < 0) goto error_unlock_call; if (msg->msg_name && call->peer) { size_t len = sizeof(call->dest_srx); memcpy(msg->msg_name, &call->dest_srx, len); msg->msg_namelen = len; } ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len, flags, &copied); if (ret == -EAGAIN) ret = 0; if (ret == -EIO) goto call_failed; if (ret < 0) goto error_unlock_call; if (rxrpc_call_is_complete(call) && skb_queue_empty(&call->recvmsg_queue)) goto call_complete; if (rxrpc_call_has_failed(call)) goto call_failed; if (!skb_queue_empty(&call->recvmsg_queue)) rxrpc_notify_socket(call); goto not_yet_complete; call_failed: rxrpc_purge_queue(&call->recvmsg_queue); call_complete: ret = rxrpc_recvmsg_term(call, msg); if (ret < 0) goto error_unlock_call; if (!(flags & MSG_PEEK)) rxrpc_release_call(rx, call); msg->msg_flags |= MSG_EOR; ret = 1; not_yet_complete: if (ret == 0) msg->msg_flags |= MSG_MORE; else msg->msg_flags &= ~MSG_MORE; ret = copied; error_unlock_call: mutex_unlock(&call->user_mutex); rxrpc_put_call(call, rxrpc_call_put_recvmsg); trace_rxrpc_recvmsg(call_debug_id, rxrpc_recvmsg_return, ret); return ret; error_requeue_call: if (!(flags & MSG_PEEK)) { spin_lock_irq(&rx->recvmsg_lock); list_add(&call->recvmsg_link, &rx->recvmsg_q); spin_unlock_irq(&rx->recvmsg_lock); trace_rxrpc_recvmsg(call_debug_id, rxrpc_recvmsg_requeue, 0); } else { rxrpc_put_call(call, rxrpc_call_put_recvmsg); } error_no_call: release_sock(&rx->sk); error_trace: trace_rxrpc_recvmsg(call_debug_id, rxrpc_recvmsg_return, ret); return ret; wait_interrupted: ret = sock_intr_errno(timeo); wait_error: finish_wait(sk_sleep(&rx->sk), &wait); call = NULL; goto error_trace; } /** * rxrpc_kernel_recv_data - Allow a kernel service to receive data/info * @sock: The socket that the call exists on * @call: The call to send data through * @iter: The buffer to receive into * @_len: The amount of data we want to receive (decreased on return) * @want_more: True if more data is expected to be read * @_abort: Where the abort code is stored if -ECONNABORTED is returned * @_service: Where to store the actual service ID (may be upgraded) * * Allow a kernel service to receive data and pick up information about the * state of a call. Note that *@_abort should also be initialised to %0. * * Note that we may return %-EAGAIN to drain empty packets at the end * of the data, even if we've already copied over the requested data. * * Return: %0 if got what was asked for and there's more available, %1 * if we got what was asked for and we're at the end of the data and * %-EAGAIN if we need more data. */ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call, struct iov_iter *iter, size_t *_len, bool want_more, u32 *_abort, u16 *_service) { size_t offset = 0; int ret; _enter("{%d},%zu,%d", call->debug_id, *_len, want_more); mutex_lock(&call->user_mutex); ret = rxrpc_recvmsg_data(sock, call, NULL, iter, *_len, 0, &offset); *_len -= offset; if (ret == -EIO) goto call_failed; if (ret < 0) goto out; /* We can only reach here with a partially full buffer if we have * reached the end of the data. We must otherwise have a full buffer * or have been given -EAGAIN. */ if (ret == 1) { if (iov_iter_count(iter) > 0) goto short_data; if (!want_more) goto read_phase_complete; ret = 0; goto out; } if (!want_more) goto excess_data; goto out; read_phase_complete: ret = 1; out: if (_service) *_service = call->dest_srx.srx_service; mutex_unlock(&call->user_mutex); _leave(" = %d [%zu,%d]", ret, iov_iter_count(iter), *_abort); return ret; short_data: trace_rxrpc_abort(call->debug_id, rxrpc_recvmsg_short_data, call->cid, call->call_id, call->rx_consumed, 0, -EBADMSG); ret = -EBADMSG; goto out; excess_data: trace_rxrpc_abort(call->debug_id, rxrpc_recvmsg_excess_data, call->cid, call->call_id, call->rx_consumed, 0, -EMSGSIZE); ret = -EMSGSIZE; goto out; call_failed: *_abort = call->abort_code; ret = call->error; if (call->completion == RXRPC_CALL_SUCCEEDED) { ret = 1; if (iov_iter_count(iter) > 0) ret = -ECONNRESET; } goto out; } EXPORT_SYMBOL(rxrpc_kernel_recv_data);
369 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * ALSA sequencer Queue handling * Copyright (c) 1998-1999 by Frank van de Pol <fvdpol@coil.demon.nl> */ #ifndef __SND_SEQ_QUEUE_H #define __SND_SEQ_QUEUE_H #include "seq_memory.h" #include "seq_prioq.h" #include "seq_timer.h" #include "seq_lock.h" #include <linux/interrupt.h> #include <linux/list.h> #include <linux/bitops.h> #define SEQ_QUEUE_NO_OWNER (-1) struct snd_seq_queue { int queue; /* queue number */ char name[64]; /* name of this queue */ struct snd_seq_prioq *tickq; /* midi tick event queue */ struct snd_seq_prioq *timeq; /* real-time event queue */ struct snd_seq_timer *timer; /* time keeper for this queue */ int owner; /* client that 'owns' the timer */ bool locked; /* timer is only accesibble by owner if set */ bool klocked; /* kernel lock (after START) */ bool check_again; /* concurrent access happened during check */ bool check_blocked; /* queue being checked */ unsigned int flags; /* status flags */ unsigned int info_flags; /* info for sync */ spinlock_t owner_lock; spinlock_t check_lock; /* clients which uses this queue (bitmap) */ DECLARE_BITMAP(clients_bitmap, SNDRV_SEQ_MAX_CLIENTS); unsigned int clients; /* users of this queue */ struct mutex timer_mutex; snd_use_lock_t use_lock; }; /* get the number of current queues */ int snd_seq_queue_get_cur_queues(void); /* delete queues */ void snd_seq_queues_delete(void); /* create new queue (constructor) */ struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int flags); /* delete queue (destructor) */ int snd_seq_queue_delete(int client, int queueid); /* final stage */ void snd_seq_queue_client_leave(int client); /* enqueue a event received from one the clients */ int snd_seq_enqueue_event(struct snd_seq_event_cell *cell, int atomic, int hop); /* Remove events */ void snd_seq_queue_remove_cells(int client, struct snd_seq_remove_events *info); /* return pointer to queue structure for specified id */ struct snd_seq_queue *queueptr(int queueid); /* unlock */ #define queuefree(q) snd_use_lock_free(&(q)->use_lock) DEFINE_FREE(snd_seq_queue, struct snd_seq_queue *, if (!IS_ERR_OR_NULL(_T)) queuefree(_T)) /* return the (first) queue matching with the specified name */ struct snd_seq_queue *snd_seq_queue_find_name(char *name); /* check single queue and dispatch events */ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop); /* access to queue's parameters */ int snd_seq_queue_check_access(int queueid, int client); int snd_seq_queue_timer_set_tempo(int queueid, int client, struct snd_seq_queue_tempo *info); int snd_seq_queue_set_owner(int queueid, int client, int locked); int snd_seq_queue_timer_open(int queueid); int snd_seq_queue_timer_close(int queueid); int snd_seq_queue_use(int queueid, int client, int use); int snd_seq_queue_is_used(int queueid, int client); int snd_seq_control_queue(struct snd_seq_event *ev, int atomic, int hop); #endif
3 3 1 2 17 17 13 2 7 4 4 4 7 7 1 5 2 2 2 2 2 4 4 1 1 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 // SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2003-2008 Takahiro Hirofuchi * Copyright (C) 2015-2016 Samsung Electronics * Krzysztof Opasiak <k.opasiak@samsung.com> */ #include <asm/byteorder.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <net/sock.h> #include "usbip_common.h" #define DRIVER_AUTHOR "Takahiro Hirofuchi <hirofuchi@users.sourceforge.net>" #define DRIVER_DESC "USB/IP Core" #ifdef CONFIG_USBIP_DEBUG unsigned long usbip_debug_flag = 0xffffffff; #else unsigned long usbip_debug_flag; #endif EXPORT_SYMBOL_GPL(usbip_debug_flag); module_param(usbip_debug_flag, ulong, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(usbip_debug_flag, "debug flags (defined in usbip_common.h)"); /* FIXME */ struct device_attribute dev_attr_usbip_debug; EXPORT_SYMBOL_GPL(dev_attr_usbip_debug); static ssize_t usbip_debug_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%lx\n", usbip_debug_flag); } static ssize_t usbip_debug_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { if (sscanf(buf, "%lx", &usbip_debug_flag) != 1) return -EINVAL; return count; } DEVICE_ATTR_RW(usbip_debug); static void usbip_dump_buffer(char *buff, int bufflen) { print_hex_dump(KERN_DEBUG, "usbip-core", DUMP_PREFIX_OFFSET, 16, 4, buff, bufflen, false); } static void usbip_dump_pipe(unsigned int p) { unsigned char type = usb_pipetype(p); unsigned char ep = usb_pipeendpoint(p); unsigned char dev = usb_pipedevice(p); unsigned char dir = usb_pipein(p); pr_debug("dev(%d) ep(%d) [%s] ", dev, ep, dir ? "IN" : "OUT"); switch (type) { case PIPE_ISOCHRONOUS: pr_debug("ISO\n"); break; case PIPE_INTERRUPT: pr_debug("INT\n"); break; case PIPE_CONTROL: pr_debug("CTRL\n"); break; case PIPE_BULK: pr_debug("BULK\n"); break; default: pr_debug("ERR\n"); break; } } static void usbip_dump_usb_device(struct usb_device *udev) { struct device *dev = &udev->dev; int i; dev_dbg(dev, " devnum(%d) devpath(%s) usb speed(%s)", udev->devnum, udev->devpath, usb_speed_string(udev->speed)); pr_debug("tt hub ttport %d\n", udev->ttport); dev_dbg(dev, " "); for (i = 0; i < 16; i++) pr_debug(" %2u", i); pr_debug("\n"); dev_dbg(dev, " toggle0(IN) :"); for (i = 0; i < 16; i++) pr_debug(" %2u", (udev->toggle[0] & (1 << i)) ? 1 : 0); pr_debug("\n"); dev_dbg(dev, " toggle1(OUT):"); for (i = 0; i < 16; i++) pr_debug(" %2u", (udev->toggle[1] & (1 << i)) ? 1 : 0); pr_debug("\n"); dev_dbg(dev, " epmaxp_in :"); for (i = 0; i < 16; i++) { if (udev->ep_in[i]) pr_debug(" %2u", le16_to_cpu(udev->ep_in[i]->desc.wMaxPacketSize)); } pr_debug("\n"); dev_dbg(dev, " epmaxp_out :"); for (i = 0; i < 16; i++) { if (udev->ep_out[i]) pr_debug(" %2u", le16_to_cpu(udev->ep_out[i]->desc.wMaxPacketSize)); } pr_debug("\n"); dev_dbg(dev, "parent %s, bus %s\n", dev_name(&udev->parent->dev), udev->bus->bus_name); dev_dbg(dev, "have_langid %d, string_langid %d\n", udev->have_langid, udev->string_langid); dev_dbg(dev, "maxchild %d\n", udev->maxchild); } static void usbip_dump_request_type(__u8 rt) { switch (rt & USB_RECIP_MASK) { case USB_RECIP_DEVICE: pr_debug("DEVICE"); break; case USB_RECIP_INTERFACE: pr_debug("INTERF"); break; case USB_RECIP_ENDPOINT: pr_debug("ENDPOI"); break; case USB_RECIP_OTHER: pr_debug("OTHER "); break; default: pr_debug("------"); break; } } static void usbip_dump_usb_ctrlrequest(struct usb_ctrlrequest *cmd) { if (!cmd) { pr_debug(" : null pointer\n"); return; } pr_debug(" "); pr_debug("bRequestType(%02X) bRequest(%02X) wValue(%04X) wIndex(%04X) wLength(%04X) ", cmd->bRequestType, cmd->bRequest, cmd->wValue, cmd->wIndex, cmd->wLength); pr_debug("\n "); if ((cmd->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { pr_debug("STANDARD "); switch (cmd->bRequest) { case USB_REQ_GET_STATUS: pr_debug("GET_STATUS\n"); break; case USB_REQ_CLEAR_FEATURE: pr_debug("CLEAR_FEAT\n"); break; case USB_REQ_SET_FEATURE: pr_debug("SET_FEAT\n"); break; case USB_REQ_SET_ADDRESS: pr_debug("SET_ADDRRS\n"); break; case USB_REQ_GET_DESCRIPTOR: pr_debug("GET_DESCRI\n"); break; case USB_REQ_SET_DESCRIPTOR: pr_debug("SET_DESCRI\n"); break; case USB_REQ_GET_CONFIGURATION: pr_debug("GET_CONFIG\n"); break; case USB_REQ_SET_CONFIGURATION: pr_debug("SET_CONFIG\n"); break; case USB_REQ_GET_INTERFACE: pr_debug("GET_INTERF\n"); break; case USB_REQ_SET_INTERFACE: pr_debug("SET_INTERF\n"); break; case USB_REQ_SYNCH_FRAME: pr_debug("SYNC_FRAME\n"); break; default: pr_debug("REQ(%02X)\n", cmd->bRequest); break; } usbip_dump_request_type(cmd->bRequestType); } else if ((cmd->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) { pr_debug("CLASS\n"); } else if ((cmd->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) { pr_debug("VENDOR\n"); } else if ((cmd->bRequestType & USB_TYPE_MASK) == USB_TYPE_RESERVED) { pr_debug("RESERVED\n"); } } void usbip_dump_urb(struct urb *urb) { struct device *dev; if (!urb) { pr_debug("urb: null pointer!!\n"); return; } if (!urb->dev) { pr_debug("urb->dev: null pointer!!\n"); return; } dev = &urb->dev->dev; usbip_dump_usb_device(urb->dev); dev_dbg(dev, " pipe :%08x ", urb->pipe); usbip_dump_pipe(urb->pipe); dev_dbg(dev, " status :%d\n", urb->status); dev_dbg(dev, " transfer_flags :%08X\n", urb->transfer_flags); dev_dbg(dev, " transfer_buffer_length:%d\n", urb->transfer_buffer_length); dev_dbg(dev, " actual_length :%d\n", urb->actual_length); if (urb->setup_packet && usb_pipetype(urb->pipe) == PIPE_CONTROL) usbip_dump_usb_ctrlrequest( (struct usb_ctrlrequest *)urb->setup_packet); dev_dbg(dev, " start_frame :%d\n", urb->start_frame); dev_dbg(dev, " number_of_packets :%d\n", urb->number_of_packets); dev_dbg(dev, " interval :%d\n", urb->interval); dev_dbg(dev, " error_count :%d\n", urb->error_count); } EXPORT_SYMBOL_GPL(usbip_dump_urb); void usbip_dump_header(struct usbip_header *pdu) { pr_debug("BASE: cmd %u seq %u devid %u dir %u ep %u\n", pdu->base.command, pdu->base.seqnum, pdu->base.devid, pdu->base.direction, pdu->base.ep); switch (pdu->base.command) { case USBIP_CMD_SUBMIT: pr_debug("USBIP_CMD_SUBMIT: x_flags %u x_len %u sf %u #p %d iv %d\n", pdu->u.cmd_submit.transfer_flags, pdu->u.cmd_submit.transfer_buffer_length, pdu->u.cmd_submit.start_frame, pdu->u.cmd_submit.number_of_packets, pdu->u.cmd_submit.interval); break; case USBIP_CMD_UNLINK: pr_debug("USBIP_CMD_UNLINK: seq %u\n", pdu->u.cmd_unlink.seqnum); break; case USBIP_RET_SUBMIT: pr_debug("USBIP_RET_SUBMIT: st %d al %u sf %d #p %d ec %d\n", pdu->u.ret_submit.status, pdu->u.ret_submit.actual_length, pdu->u.ret_submit.start_frame, pdu->u.ret_submit.number_of_packets, pdu->u.ret_submit.error_count); break; case USBIP_RET_UNLINK: pr_debug("USBIP_RET_UNLINK: status %d\n", pdu->u.ret_unlink.status); break; default: /* NOT REACHED */ pr_err("unknown command\n"); break; } } EXPORT_SYMBOL_GPL(usbip_dump_header); /* Receive data over TCP/IP. */ int usbip_recv(struct socket *sock, void *buf, int size) { int result; struct kvec iov = {.iov_base = buf, .iov_len = size}; struct msghdr msg = {.msg_flags = MSG_NOSIGNAL}; int total = 0; if (!sock || !buf || !size) return -EINVAL; iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, size); usbip_dbg_xmit("enter\n"); do { sock->sk->sk_allocation = GFP_NOIO; sock->sk->sk_use_task_frag = false; result = sock_recvmsg(sock, &msg, MSG_WAITALL); if (result <= 0) goto err; total += result; } while (msg_data_left(&msg)); if (usbip_dbg_flag_xmit) { pr_debug("receiving....\n"); usbip_dump_buffer(buf, size); pr_debug("received, osize %d ret %d size %zd total %d\n", size, result, msg_data_left(&msg), total); } return total; err: return result; } EXPORT_SYMBOL_GPL(usbip_recv); /* there may be more cases to tweak the flags. */ static unsigned int tweak_transfer_flags(unsigned int flags) { flags &= ~URB_NO_TRANSFER_DMA_MAP; return flags; } /* * USBIP driver packs URB transfer flags in PDUs that are exchanged * between Server (usbip_host) and Client (vhci_hcd). URB_* flags * are internal to kernel and could change. Where as USBIP URB flags * exchanged in PDUs are USBIP user API must not change. * * USBIP_URB* flags are exported as explicit API and client and server * do mapping from kernel flags to USBIP_URB*. Details as follows: * * Client tx path (USBIP_CMD_SUBMIT): * - Maps URB_* to USBIP_URB_* when it sends USBIP_CMD_SUBMIT packet. * * Server rx path (USBIP_CMD_SUBMIT): * - Maps USBIP_URB_* to URB_* when it receives USBIP_CMD_SUBMIT packet. * * Flags aren't included in USBIP_CMD_UNLINK and USBIP_RET_SUBMIT packets * and no special handling is needed for them in the following cases: * - Server rx path (USBIP_CMD_UNLINK) * - Client rx path & Server tx path (USBIP_RET_SUBMIT) * * Code paths: * usbip_pack_pdu() is the common routine that handles packing pdu from * urb and unpack pdu to an urb. * * usbip_pack_cmd_submit() and usbip_pack_ret_submit() handle * USBIP_CMD_SUBMIT and USBIP_RET_SUBMIT respectively. * * usbip_map_urb_to_usbip() and usbip_map_usbip_to_urb() are used * by usbip_pack_cmd_submit() and usbip_pack_ret_submit() to map * flags. */ struct urb_to_usbip_flags { u32 urb_flag; u32 usbip_flag; }; #define NUM_USBIP_FLAGS 17 static const struct urb_to_usbip_flags flag_map[NUM_USBIP_FLAGS] = { {URB_SHORT_NOT_OK, USBIP_URB_SHORT_NOT_OK}, {URB_ISO_ASAP, USBIP_URB_ISO_ASAP}, {URB_NO_TRANSFER_DMA_MAP, USBIP_URB_NO_TRANSFER_DMA_MAP}, {URB_ZERO_PACKET, USBIP_URB_ZERO_PACKET}, {URB_NO_INTERRUPT, USBIP_URB_NO_INTERRUPT}, {URB_FREE_BUFFER, USBIP_URB_FREE_BUFFER}, {URB_DIR_IN, USBIP_URB_DIR_IN}, {URB_DIR_OUT, USBIP_URB_DIR_OUT}, {URB_DIR_MASK, USBIP_URB_DIR_MASK}, {URB_DMA_MAP_SINGLE, USBIP_URB_DMA_MAP_SINGLE}, {URB_DMA_MAP_PAGE, USBIP_URB_DMA_MAP_PAGE}, {URB_DMA_MAP_SG, USBIP_URB_DMA_MAP_SG}, {URB_MAP_LOCAL, USBIP_URB_MAP_LOCAL}, {URB_SETUP_MAP_SINGLE, USBIP_URB_SETUP_MAP_SINGLE}, {URB_SETUP_MAP_LOCAL, USBIP_URB_SETUP_MAP_LOCAL}, {URB_DMA_SG_COMBINED, USBIP_URB_DMA_SG_COMBINED}, {URB_ALIGNED_TEMP_BUFFER, USBIP_URB_ALIGNED_TEMP_BUFFER}, }; static unsigned int urb_to_usbip(unsigned int flags) { unsigned int map_flags = 0; int loop; for (loop = 0; loop < NUM_USBIP_FLAGS; loop++) { if (flags & flag_map[loop].urb_flag) map_flags |= flag_map[loop].usbip_flag; } return map_flags; } static unsigned int usbip_to_urb(unsigned int flags) { unsigned int map_flags = 0; int loop; for (loop = 0; loop < NUM_USBIP_FLAGS; loop++) { if (flags & flag_map[loop].usbip_flag) map_flags |= flag_map[loop].urb_flag; } return map_flags; } static void usbip_pack_cmd_submit(struct usbip_header *pdu, struct urb *urb, int pack) { struct usbip_header_cmd_submit *spdu = &pdu->u.cmd_submit; /* * Some members are not still implemented in usbip. I hope this issue * will be discussed when usbip is ported to other operating systems. */ if (pack) { /* map after tweaking the urb flags */ spdu->transfer_flags = urb_to_usbip(tweak_transfer_flags(urb->transfer_flags)); spdu->transfer_buffer_length = urb->transfer_buffer_length; spdu->start_frame = urb->start_frame; spdu->number_of_packets = urb->number_of_packets; spdu->interval = urb->interval; } else { urb->transfer_flags = usbip_to_urb(spdu->transfer_flags); urb->transfer_buffer_length = spdu->transfer_buffer_length; urb->start_frame = spdu->start_frame; urb->number_of_packets = spdu->number_of_packets; urb->interval = spdu->interval; } } static void usbip_pack_ret_submit(struct usbip_header *pdu, struct urb *urb, int pack) { struct usbip_header_ret_submit *rpdu = &pdu->u.ret_submit; if (pack) { rpdu->status = urb->status; rpdu->actual_length = urb->actual_length; rpdu->start_frame = urb->start_frame; rpdu->number_of_packets = urb->number_of_packets; rpdu->error_count = urb->error_count; } else { urb->status = rpdu->status; urb->actual_length = rpdu->actual_length; urb->start_frame = rpdu->start_frame; urb->number_of_packets = rpdu->number_of_packets; urb->error_count = rpdu->error_count; } } void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd, int pack) { switch (cmd) { case USBIP_CMD_SUBMIT: usbip_pack_cmd_submit(pdu, urb, pack); break; case USBIP_RET_SUBMIT: usbip_pack_ret_submit(pdu, urb, pack); break; default: /* NOT REACHED */ pr_err("unknown command\n"); break; } } EXPORT_SYMBOL_GPL(usbip_pack_pdu); static void correct_endian_basic(struct usbip_header_basic *base, int send) { if (send) { base->command = cpu_to_be32(base->command); base->seqnum = cpu_to_be32(base->seqnum); base->devid = cpu_to_be32(base->devid); base->direction = cpu_to_be32(base->direction); base->ep = cpu_to_be32(base->ep); } else { base->command = be32_to_cpu(base->command); base->seqnum = be32_to_cpu(base->seqnum); base->devid = be32_to_cpu(base->devid); base->direction = be32_to_cpu(base->direction); base->ep = be32_to_cpu(base->ep); } } static void correct_endian_cmd_submit(struct usbip_header_cmd_submit *pdu, int send) { if (send) { pdu->transfer_flags = cpu_to_be32(pdu->transfer_flags); cpu_to_be32s(&pdu->transfer_buffer_length); cpu_to_be32s(&pdu->start_frame); cpu_to_be32s(&pdu->number_of_packets); cpu_to_be32s(&pdu->interval); } else { pdu->transfer_flags = be32_to_cpu(pdu->transfer_flags); be32_to_cpus(&pdu->transfer_buffer_length); be32_to_cpus(&pdu->start_frame); be32_to_cpus(&pdu->number_of_packets); be32_to_cpus(&pdu->interval); } } static void correct_endian_ret_submit(struct usbip_header_ret_submit *pdu, int send) { if (send) { cpu_to_be32s(&pdu->status); cpu_to_be32s(&pdu->actual_length); cpu_to_be32s(&pdu->start_frame); cpu_to_be32s(&pdu->number_of_packets); cpu_to_be32s(&pdu->error_count); } else { be32_to_cpus(&pdu->status); be32_to_cpus(&pdu->actual_length); be32_to_cpus(&pdu->start_frame); be32_to_cpus(&pdu->number_of_packets); be32_to_cpus(&pdu->error_count); } } static void correct_endian_cmd_unlink(struct usbip_header_cmd_unlink *pdu, int send) { if (send) pdu->seqnum = cpu_to_be32(pdu->seqnum); else pdu->seqnum = be32_to_cpu(pdu->seqnum); } static void correct_endian_ret_unlink(struct usbip_header_ret_unlink *pdu, int send) { if (send) cpu_to_be32s(&pdu->status); else be32_to_cpus(&pdu->status); } void usbip_header_correct_endian(struct usbip_header *pdu, int send) { __u32 cmd = 0; if (send) cmd = pdu->base.command; correct_endian_basic(&pdu->base, send); if (!send) cmd = pdu->base.command; switch (cmd) { case USBIP_CMD_SUBMIT: correct_endian_cmd_submit(&pdu->u.cmd_submit, send); break; case USBIP_RET_SUBMIT: correct_endian_ret_submit(&pdu->u.ret_submit, send); break; case USBIP_CMD_UNLINK: correct_endian_cmd_unlink(&pdu->u.cmd_unlink, send); break; case USBIP_RET_UNLINK: correct_endian_ret_unlink(&pdu->u.ret_unlink, send); break; default: /* NOT REACHED */ pr_err("unknown command\n"); break; } } EXPORT_SYMBOL_GPL(usbip_header_correct_endian); static void usbip_iso_packet_correct_endian( struct usbip_iso_packet_descriptor *iso, int send) { /* does not need all members. but copy all simply. */ if (send) { iso->offset = cpu_to_be32(iso->offset); iso->length = cpu_to_be32(iso->length); iso->status = cpu_to_be32(iso->status); iso->actual_length = cpu_to_be32(iso->actual_length); } else { iso->offset = be32_to_cpu(iso->offset); iso->length = be32_to_cpu(iso->length); iso->status = be32_to_cpu(iso->status); iso->actual_length = be32_to_cpu(iso->actual_length); } } static void usbip_pack_iso(struct usbip_iso_packet_descriptor *iso, struct usb_iso_packet_descriptor *uiso, int pack) { if (pack) { iso->offset = uiso->offset; iso->length = uiso->length; iso->status = uiso->status; iso->actual_length = uiso->actual_length; } else { uiso->offset = iso->offset; uiso->length = iso->length; uiso->status = iso->status; uiso->actual_length = iso->actual_length; } } /* must free buffer */ struct usbip_iso_packet_descriptor* usbip_alloc_iso_desc_pdu(struct urb *urb, ssize_t *bufflen) { struct usbip_iso_packet_descriptor *iso; int np = urb->number_of_packets; ssize_t size = np * sizeof(*iso); int i; iso = kzalloc(size, GFP_KERNEL); if (!iso) return NULL; for (i = 0; i < np; i++) { usbip_pack_iso(&iso[i], &urb->iso_frame_desc[i], 1); usbip_iso_packet_correct_endian(&iso[i], 1); } *bufflen = size; return iso; } EXPORT_SYMBOL_GPL(usbip_alloc_iso_desc_pdu); /* some members of urb must be substituted before. */ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb) { void *buff; struct usbip_iso_packet_descriptor *iso; int np = urb->number_of_packets; int size = np * sizeof(*iso); int i; int ret; int total_length = 0; if (!usb_pipeisoc(urb->pipe)) return 0; /* my Bluetooth dongle gets ISO URBs which are np = 0 */ if (np == 0) return 0; buff = kzalloc(size, GFP_KERNEL); if (!buff) return -ENOMEM; ret = usbip_recv(ud->tcp_socket, buff, size); if (ret != size) { dev_err(&urb->dev->dev, "recv iso_frame_descriptor, %d\n", ret); kfree(buff); if (ud->side == USBIP_STUB || ud->side == USBIP_VUDC) usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); else usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); return -EPIPE; } iso = (struct usbip_iso_packet_descriptor *) buff; for (i = 0; i < np; i++) { usbip_iso_packet_correct_endian(&iso[i], 0); usbip_pack_iso(&iso[i], &urb->iso_frame_desc[i], 0); total_length += urb->iso_frame_desc[i].actual_length; } kfree(buff); if (total_length != urb->actual_length) { dev_err(&urb->dev->dev, "total length of iso packets %d not equal to actual length of buffer %d\n", total_length, urb->actual_length); if (ud->side == USBIP_STUB || ud->side == USBIP_VUDC) usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); else usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); return -EPIPE; } return ret; } EXPORT_SYMBOL_GPL(usbip_recv_iso); /* * This functions restores the padding which was removed for optimizing * the bandwidth during transfer over tcp/ip * * buffer and iso packets need to be stored and be in propeper endian in urb * before calling this function */ void usbip_pad_iso(struct usbip_device *ud, struct urb *urb) { int np = urb->number_of_packets; int i; int actualoffset = urb->actual_length; if (!usb_pipeisoc(urb->pipe)) return; /* if no packets or length of data is 0, then nothing to unpack */ if (np == 0 || urb->actual_length == 0) return; /* * if actual_length is transfer_buffer_length then no padding is * present. */ if (urb->actual_length == urb->transfer_buffer_length) return; /* * loop over all packets from last to first (to prevent overwriting * memory when padding) and move them into the proper place */ for (i = np-1; i > 0; i--) { actualoffset -= urb->iso_frame_desc[i].actual_length; memmove(urb->transfer_buffer + urb->iso_frame_desc[i].offset, urb->transfer_buffer + actualoffset, urb->iso_frame_desc[i].actual_length); } } EXPORT_SYMBOL_GPL(usbip_pad_iso); /* some members of urb must be substituted before. */ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb) { struct scatterlist *sg; int ret = 0; int recv; int size; int copy; int i; if (ud->side == USBIP_STUB || ud->side == USBIP_VUDC) { /* the direction of urb must be OUT. */ if (usb_pipein(urb->pipe)) return 0; size = urb->transfer_buffer_length; } else { /* the direction of urb must be IN. */ if (usb_pipeout(urb->pipe)) return 0; size = urb->actual_length; } /* no need to recv xbuff */ if (!(size > 0)) return 0; if (size > urb->transfer_buffer_length) /* should not happen, probably malicious packet */ goto error; if (urb->num_sgs) { copy = size; for_each_sg(urb->sg, sg, urb->num_sgs, i) { int recv_size; if (copy < sg->length) recv_size = copy; else recv_size = sg->length; recv = usbip_recv(ud->tcp_socket, sg_virt(sg), recv_size); if (recv != recv_size) goto error; copy -= recv; ret += recv; if (!copy) break; } if (ret != size) goto error; } else { ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size); if (ret != size) goto error; } return ret; error: dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret); if (ud->side == USBIP_STUB || ud->side == USBIP_VUDC) usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); else usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); return -EPIPE; } EXPORT_SYMBOL_GPL(usbip_recv_xbuff); static int __init usbip_core_init(void) { return usbip_init_eh(); } static void __exit usbip_core_exit(void) { usbip_finish_eh(); return; } module_init(usbip_core_init); module_exit(usbip_core_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
15 15 15 15 15 15 15 15 5 6 6 1 6 6 66 66 66 2 66 65 1 64 366 359 2 2 2 1 2 57 57 57 57 57 38 41 57 57 16 16 1 16 15 1 16 117 60 57 41 16 16 57 42 14 11 11 10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 // SPDX-License-Identifier: GPL-2.0-only /* * Yama Linux Security Module * * Author: Kees Cook <keescook@chromium.org> * * Copyright (C) 2010 Canonical, Ltd. * Copyright (C) 2011 The Chromium OS Authors. */ #include <linux/lsm_hooks.h> #include <linux/sysctl.h> #include <linux/ptrace.h> #include <linux/prctl.h> #include <linux/ratelimit.h> #include <linux/workqueue.h> #include <linux/string_helpers.h> #include <linux/task_work.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <uapi/linux/lsm.h> #define YAMA_SCOPE_DISABLED 0 #define YAMA_SCOPE_RELATIONAL 1 #define YAMA_SCOPE_CAPABILITY 2 #define YAMA_SCOPE_NO_ATTACH 3 static int ptrace_scope = YAMA_SCOPE_RELATIONAL; /* describe a ptrace relationship for potential exception */ struct ptrace_relation { struct task_struct *tracer; struct task_struct *tracee; bool invalid; struct list_head node; struct rcu_head rcu; }; static LIST_HEAD(ptracer_relations); static DEFINE_SPINLOCK(ptracer_relations_lock); static void yama_relation_cleanup(struct work_struct *work); static DECLARE_WORK(yama_relation_work, yama_relation_cleanup); struct access_report_info { struct callback_head work; const char *access; struct task_struct *target; struct task_struct *agent; }; static void __report_access(struct callback_head *work) { struct access_report_info *info = container_of(work, struct access_report_info, work); char *target_cmd, *agent_cmd; target_cmd = kstrdup_quotable_cmdline(info->target, GFP_KERNEL); agent_cmd = kstrdup_quotable_cmdline(info->agent, GFP_KERNEL); pr_notice_ratelimited( "ptrace %s of \"%s\"[%d] was attempted by \"%s\"[%d]\n", info->access, target_cmd, info->target->pid, agent_cmd, info->agent->pid); kfree(agent_cmd); kfree(target_cmd); put_task_struct(info->agent); put_task_struct(info->target); kfree(info); } /* defers execution because cmdline access can sleep */ static void report_access(const char *access, struct task_struct *target, struct task_struct *agent) { struct access_report_info *info; assert_spin_locked(&target->alloc_lock); /* for target->comm */ if (current->flags & PF_KTHREAD) { /* I don't think kthreads call task_work_run() before exiting. * Imagine angry ranting about procfs here. */ pr_notice_ratelimited( "ptrace %s of \"%s\"[%d] was attempted by \"%s\"[%d]\n", access, target->comm, target->pid, agent->comm, agent->pid); return; } info = kmalloc(sizeof(*info), GFP_ATOMIC); if (!info) return; init_task_work(&info->work, __report_access); get_task_struct(target); get_task_struct(agent); info->access = access; info->target = target; info->agent = agent; if (task_work_add(current, &info->work, TWA_RESUME) == 0) return; /* success */ WARN(1, "report_access called from exiting task"); put_task_struct(target); put_task_struct(agent); kfree(info); } /** * yama_relation_cleanup - remove invalid entries from the relation list * @work: unused * */ static void yama_relation_cleanup(struct work_struct *work) { struct ptrace_relation *relation; spin_lock(&ptracer_relations_lock); rcu_read_lock(); list_for_each_entry_rcu(relation, &ptracer_relations, node) { if (relation->invalid) { list_del_rcu(&relation->node); kfree_rcu(relation, rcu); } } rcu_read_unlock(); spin_unlock(&ptracer_relations_lock); } /** * yama_ptracer_add - add/replace an exception for this tracer/tracee pair * @tracer: the task_struct of the process doing the ptrace * @tracee: the task_struct of the process to be ptraced * * Each tracee can have, at most, one tracer registered. Each time this * is called, the prior registered tracer will be replaced for the tracee. * * Returns 0 if relationship was added, -ve on error. */ static int yama_ptracer_add(struct task_struct *tracer, struct task_struct *tracee) { struct ptrace_relation *relation, *added; added = kmalloc(sizeof(*added), GFP_KERNEL); if (!added) return -ENOMEM; added->tracee = tracee; added->tracer = tracer; added->invalid = false; spin_lock(&ptracer_relations_lock); rcu_read_lock(); list_for_each_entry_rcu(relation, &ptracer_relations, node) { if (relation->invalid) continue; if (relation->tracee == tracee) { list_replace_rcu(&relation->node, &added->node); kfree_rcu(relation, rcu); goto out; } } list_add_rcu(&added->node, &ptracer_relations); out: rcu_read_unlock(); spin_unlock(&ptracer_relations_lock); return 0; } /** * yama_ptracer_del - remove exceptions related to the given tasks * @tracer: remove any relation where tracer task matches * @tracee: remove any relation where tracee task matches */ static void yama_ptracer_del(struct task_struct *tracer, struct task_struct *tracee) { struct ptrace_relation *relation; bool marked = false; rcu_read_lock(); list_for_each_entry_rcu(relation, &ptracer_relations, node) { if (relation->invalid) continue; if (relation->tracee == tracee || (tracer && relation->tracer == tracer)) { relation->invalid = true; marked = true; } } rcu_read_unlock(); if (marked) schedule_work(&yama_relation_work); } /** * yama_task_free - check for task_pid to remove from exception list * @task: task being removed */ static void yama_task_free(struct task_struct *task) { yama_ptracer_del(task, task); } /** * yama_task_prctl - check for Yama-specific prctl operations * @option: operation * @arg2: argument * @arg3: argument * @arg4: argument * @arg5: argument * * Return 0 on success, -ve on error. -ENOSYS is returned when Yama * does not handle the given option. */ static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5) { int rc = -ENOSYS; struct task_struct *myself; switch (option) { case PR_SET_PTRACER: /* Since a thread can call prctl(), find the group leader * before calling _add() or _del() on it, since we want * process-level granularity of control. The tracer group * leader checking is handled later when walking the ancestry * at the time of PTRACE_ATTACH check. */ myself = current->group_leader; if (arg2 == 0) { yama_ptracer_del(NULL, myself); rc = 0; } else if (arg2 == PR_SET_PTRACER_ANY || (int)arg2 == -1) { rc = yama_ptracer_add(NULL, myself); } else { struct task_struct *tracer; tracer = find_get_task_by_vpid(arg2); if (!tracer) { rc = -EINVAL; } else { rc = yama_ptracer_add(tracer, myself); put_task_struct(tracer); } } break; } return rc; } /** * task_is_descendant - walk up a process family tree looking for a match * @parent: the process to compare against while walking up from child * @child: the process to start from while looking upwards for parent * * Returns 1 if child is a descendant of parent, 0 if not. */ static int task_is_descendant(struct task_struct *parent, struct task_struct *child) { int rc = 0; struct task_struct *walker = child; if (!parent || !child) return 0; rcu_read_lock(); if (!thread_group_leader(parent)) parent = rcu_dereference(parent->group_leader); while (walker->pid > 0) { if (!thread_group_leader(walker)) walker = rcu_dereference(walker->group_leader); if (walker == parent) { rc = 1; break; } walker = rcu_dereference(walker->real_parent); } rcu_read_unlock(); return rc; } /** * ptracer_exception_found - tracer registered as exception for this tracee * @tracer: the task_struct of the process attempting ptrace * @tracee: the task_struct of the process to be ptraced * * Returns 1 if tracer has a ptracer exception ancestor for tracee. */ static int ptracer_exception_found(struct task_struct *tracer, struct task_struct *tracee) { int rc = 0; struct ptrace_relation *relation; struct task_struct *parent = NULL; bool found = false; rcu_read_lock(); /* * If there's already an active tracing relationship, then make an * exception for the sake of other accesses, like process_vm_rw(). */ parent = ptrace_parent(tracee); if (parent != NULL && same_thread_group(parent, tracer)) { rc = 1; goto unlock; } /* Look for a PR_SET_PTRACER relationship. */ if (!thread_group_leader(tracee)) tracee = rcu_dereference(tracee->group_leader); list_for_each_entry_rcu(relation, &ptracer_relations, node) { if (relation->invalid) continue; if (relation->tracee == tracee) { parent = relation->tracer; found = true; break; } } if (found && (parent == NULL || task_is_descendant(parent, tracer))) rc = 1; unlock: rcu_read_unlock(); return rc; } /** * yama_ptrace_access_check - validate PTRACE_ATTACH calls * @child: task that current task is attempting to ptrace * @mode: ptrace attach mode * * Returns 0 if following the ptrace is allowed, -ve on error. */ static int yama_ptrace_access_check(struct task_struct *child, unsigned int mode) { int rc = 0; /* require ptrace target be a child of ptracer on attach */ if (mode & PTRACE_MODE_ATTACH) { switch (ptrace_scope) { case YAMA_SCOPE_DISABLED: /* No additional restrictions. */ break; case YAMA_SCOPE_RELATIONAL: rcu_read_lock(); if (!pid_alive(child)) rc = -EPERM; if (!rc && !task_is_descendant(current, child) && !ptracer_exception_found(current, child) && !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE)) rc = -EPERM; rcu_read_unlock(); break; case YAMA_SCOPE_CAPABILITY: rcu_read_lock(); if (!ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE)) rc = -EPERM; rcu_read_unlock(); break; case YAMA_SCOPE_NO_ATTACH: default: rc = -EPERM; break; } } if (rc && (mode & PTRACE_MODE_NOAUDIT) == 0) report_access("attach", child, current); return rc; } /** * yama_ptrace_traceme - validate PTRACE_TRACEME calls * @parent: task that will become the ptracer of the current task * * Returns 0 if following the ptrace is allowed, -ve on error. */ static int yama_ptrace_traceme(struct task_struct *parent) { int rc = 0; /* Only disallow PTRACE_TRACEME on more aggressive settings. */ switch (ptrace_scope) { case YAMA_SCOPE_CAPABILITY: if (!has_ns_capability(parent, current_user_ns(), CAP_SYS_PTRACE)) rc = -EPERM; break; case YAMA_SCOPE_NO_ATTACH: rc = -EPERM; break; } if (rc) { task_lock(current); report_access("traceme", current, parent); task_unlock(current); } return rc; } static const struct lsm_id yama_lsmid = { .name = "yama", .id = LSM_ID_YAMA, }; static struct security_hook_list yama_hooks[] __ro_after_init = { LSM_HOOK_INIT(ptrace_access_check, yama_ptrace_access_check), LSM_HOOK_INIT(ptrace_traceme, yama_ptrace_traceme), LSM_HOOK_INIT(task_prctl, yama_task_prctl), LSM_HOOK_INIT(task_free, yama_task_free), }; #ifdef CONFIG_SYSCTL static int yama_dointvec_minmax(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table table_copy; if (write && !capable(CAP_SYS_PTRACE)) return -EPERM; /* Lock the max value if it ever gets set. */ table_copy = *table; if (*(int *)table_copy.data == *(int *)table_copy.extra2) table_copy.extra1 = table_copy.extra2; return proc_dointvec_minmax(&table_copy, write, buffer, lenp, ppos); } static int max_scope = YAMA_SCOPE_NO_ATTACH; static const struct ctl_table yama_sysctl_table[] = { { .procname = "ptrace_scope", .data = &ptrace_scope, .maxlen = sizeof(int), .mode = 0644, .proc_handler = yama_dointvec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = &max_scope, }, }; static void __init yama_init_sysctl(void) { if (!register_sysctl("kernel/yama", yama_sysctl_table)) panic("Yama: sysctl registration failed.\n"); } #else static inline void yama_init_sysctl(void) { } #endif /* CONFIG_SYSCTL */ static int __init yama_init(void) { pr_info("Yama: becoming mindful.\n"); security_add_hooks(yama_hooks, ARRAY_SIZE(yama_hooks), &yama_lsmid); yama_init_sysctl(); return 0; } DEFINE_LSM(yama) = { .id = &yama_lsmid, .init = yama_init, };
45 41 4 5 5 10 5 4 5 6 3 1 1 1 6 6 1 1 19 18 18 10 8 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2013 Eric Leblond <eric@regit.org> * * Development of this code partly funded by OISF * (http://www.openinfosecfoundation.org/) */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/netlink.h> #include <linux/jhash.h> #include <linux/netfilter.h> #include <linux/netfilter/nf_tables.h> #include <net/netfilter/nf_tables.h> #include <net/netfilter/nf_queue.h> static u32 jhash_initval __read_mostly; struct nft_queue { u8 sreg_qnum; u16 queuenum; u16 queues_total; u16 flags; }; static void nft_queue_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { struct nft_queue *priv = nft_expr_priv(expr); u32 queue = priv->queuenum; u32 ret; if (priv->queues_total > 1) { if (priv->flags & NFT_QUEUE_FLAG_CPU_FANOUT) { int cpu = raw_smp_processor_id(); queue = priv->queuenum + cpu % priv->queues_total; } else { queue = nfqueue_hash(pkt->skb, queue, priv->queues_total, nft_pf(pkt), jhash_initval); } } ret = NF_QUEUE_NR(queue); if (priv->flags & NFT_QUEUE_FLAG_BYPASS) ret |= NF_VERDICT_FLAG_QUEUE_BYPASS; regs->verdict.code = ret; } static void nft_queue_sreg_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { struct nft_queue *priv = nft_expr_priv(expr); u32 queue, ret; queue = regs->data[priv->sreg_qnum]; ret = NF_QUEUE_NR(queue); if (priv->flags & NFT_QUEUE_FLAG_BYPASS) ret |= NF_VERDICT_FLAG_QUEUE_BYPASS; regs->verdict.code = ret; } static int nft_queue_validate(const struct nft_ctx *ctx, const struct nft_expr *expr) { static const unsigned int supported_hooks = ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD) | (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_POST_ROUTING)); switch (ctx->family) { case NFPROTO_IPV4: case NFPROTO_IPV6: case NFPROTO_INET: case NFPROTO_BRIDGE: break; case NFPROTO_NETDEV: /* lacks okfn */ fallthrough; default: return -EOPNOTSUPP; } return nft_chain_validate_hooks(ctx->chain, supported_hooks); } static const struct nla_policy nft_queue_policy[NFTA_QUEUE_MAX + 1] = { [NFTA_QUEUE_NUM] = { .type = NLA_U16 }, [NFTA_QUEUE_TOTAL] = { .type = NLA_U16 }, [NFTA_QUEUE_FLAGS] = { .type = NLA_U16 }, [NFTA_QUEUE_SREG_QNUM] = { .type = NLA_U32 }, }; static int nft_queue_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_queue *priv = nft_expr_priv(expr); u32 maxid; priv->queuenum = ntohs(nla_get_be16(tb[NFTA_QUEUE_NUM])); if (tb[NFTA_QUEUE_TOTAL]) priv->queues_total = ntohs(nla_get_be16(tb[NFTA_QUEUE_TOTAL])); else priv->queues_total = 1; if (priv->queues_total == 0) return -EINVAL; maxid = priv->queues_total - 1 + priv->queuenum; if (maxid > U16_MAX) return -ERANGE; if (tb[NFTA_QUEUE_FLAGS]) { priv->flags = ntohs(nla_get_be16(tb[NFTA_QUEUE_FLAGS])); if (priv->flags & ~NFT_QUEUE_FLAG_MASK) return -EINVAL; } return 0; } static int nft_queue_sreg_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_queue *priv = nft_expr_priv(expr); int err; err = nft_parse_register_load(ctx, tb[NFTA_QUEUE_SREG_QNUM], &priv->sreg_qnum, sizeof(u32)); if (err < 0) return err; if (tb[NFTA_QUEUE_FLAGS]) { priv->flags = ntohs(nla_get_be16(tb[NFTA_QUEUE_FLAGS])); if (priv->flags & ~NFT_QUEUE_FLAG_MASK) return -EINVAL; if (priv->flags & NFT_QUEUE_FLAG_CPU_FANOUT) return -EOPNOTSUPP; } return 0; } static int nft_queue_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset) { const struct nft_queue *priv = nft_expr_priv(expr); if (nla_put_be16(skb, NFTA_QUEUE_NUM, htons(priv->queuenum)) || nla_put_be16(skb, NFTA_QUEUE_TOTAL, htons(priv->queues_total)) || nla_put_be16(skb, NFTA_QUEUE_FLAGS, htons(priv->flags))) goto nla_put_failure; return 0; nla_put_failure: return -1; } static int nft_queue_sreg_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset) { const struct nft_queue *priv = nft_expr_priv(expr); if (nft_dump_register(skb, NFTA_QUEUE_SREG_QNUM, priv->sreg_qnum) || nla_put_be16(skb, NFTA_QUEUE_FLAGS, htons(priv->flags))) goto nla_put_failure; return 0; nla_put_failure: return -1; } static struct nft_expr_type nft_queue_type; static const struct nft_expr_ops nft_queue_ops = { .type = &nft_queue_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_queue)), .eval = nft_queue_eval, .init = nft_queue_init, .dump = nft_queue_dump, .validate = nft_queue_validate, .reduce = NFT_REDUCE_READONLY, }; static const struct nft_expr_ops nft_queue_sreg_ops = { .type = &nft_queue_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_queue)), .eval = nft_queue_sreg_eval, .init = nft_queue_sreg_init, .dump = nft_queue_sreg_dump, .validate = nft_queue_validate, .reduce = NFT_REDUCE_READONLY, }; static const struct nft_expr_ops * nft_queue_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) { if (tb[NFTA_QUEUE_NUM] && tb[NFTA_QUEUE_SREG_QNUM]) return ERR_PTR(-EINVAL); init_hashrandom(&jhash_initval); if (tb[NFTA_QUEUE_NUM]) return &nft_queue_ops; if (tb[NFTA_QUEUE_SREG_QNUM]) return &nft_queue_sreg_ops; return ERR_PTR(-EINVAL); } static struct nft_expr_type nft_queue_type __read_mostly = { .name = "queue", .select_ops = nft_queue_select_ops, .policy = nft_queue_policy, .maxattr = NFTA_QUEUE_MAX, .owner = THIS_MODULE, }; static int __init nft_queue_module_init(void) { return nft_register_expr(&nft_queue_type); } static void __exit nft_queue_module_exit(void) { nft_unregister_expr(&nft_queue_type); } module_init(nft_queue_module_init); module_exit(nft_queue_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Eric Leblond <eric@regit.org>"); MODULE_ALIAS_NFT_EXPR("queue"); MODULE_DESCRIPTION("Netfilter nftables queue module");
10 4 1 1 1 2 9 9 5 2 1 1 4 5 2 1 1 9 9 9 9 9 9 3 6 3 7 7 7 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 // SPDX-License-Identifier: GPL-2.0-only /* * atusb.c - Driver for the ATUSB IEEE 802.15.4 dongle * * Written 2013 by Werner Almesberger <werner@almesberger.net> * * Copyright (c) 2015 - 2016 Stefan Schmidt <stefan@datenfreihafen.org> * * Based on at86rf230.c and spi_atusb.c. * at86rf230.c is * Copyright (C) 2009 Siemens AG * Written by: Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com> * * spi_atusb.c is * Copyright (c) 2011 Richard Sharpe <realrichardsharpe@gmail.com> * Copyright (c) 2011 Stefan Schmidt <stefan@datenfreihafen.org> * Copyright (c) 2011 Werner Almesberger <werner@almesberger.net> * * USB initialization is * Copyright (c) 2013 Alexander Aring <alex.aring@gmail.com> * * Busware HUL support is * Copyright (c) 2017 Josef Filzmaier <j.filzmaier@gmx.at> */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/jiffies.h> #include <linux/usb.h> #include <linux/skbuff.h> #include <net/cfg802154.h> #include <net/mac802154.h> #include "at86rf230.h" #include "atusb.h" #define ATUSB_JEDEC_ATMEL 0x1f /* JEDEC manufacturer ID */ #define ATUSB_NUM_RX_URBS 4 /* allow for a bit of local latency */ #define ATUSB_ALLOC_DELAY_MS 100 /* delay after failed allocation */ #define ATUSB_TX_TIMEOUT_MS 200 /* on the air timeout */ struct atusb { struct ieee802154_hw *hw; struct usb_device *usb_dev; struct atusb_chip_data *data; int shutdown; /* non-zero if shutting down */ int err; /* set by first error */ /* RX variables */ struct delayed_work work; /* memory allocations */ struct usb_anchor idle_urbs; /* URBs waiting to be submitted */ struct usb_anchor rx_urbs; /* URBs waiting for reception */ /* TX variables */ struct usb_ctrlrequest tx_dr; struct urb *tx_urb; struct sk_buff *tx_skb; u8 tx_ack_seq; /* current TX ACK sequence number */ /* Firmware variable */ unsigned char fw_ver_maj; /* Firmware major version number */ unsigned char fw_ver_min; /* Firmware minor version number */ unsigned char fw_hw_type; /* Firmware hardware type */ }; struct atusb_chip_data { u16 t_channel_switch; int rssi_base_val; int (*set_channel)(struct ieee802154_hw*, u8, u8); int (*set_txpower)(struct ieee802154_hw*, s32); }; static int atusb_write_subreg(struct atusb *atusb, u8 reg, u8 mask, u8 shift, u8 value) { struct usb_device *usb_dev = atusb->usb_dev; u8 orig, tmp; int ret = 0; dev_dbg(&usb_dev->dev, "%s: 0x%02x <- 0x%02x\n", __func__, reg, value); ret = usb_control_msg_recv(usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV, 0, reg, &orig, 1, 1000, GFP_KERNEL); if (ret < 0) return ret; /* Write the value only into that part of the register which is allowed * by the mask. All other bits stay as before. */ tmp = orig & ~mask; tmp |= (value << shift) & mask; if (tmp != orig) ret = usb_control_msg_send(usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV, tmp, reg, NULL, 0, 1000, GFP_KERNEL); return ret; } static int atusb_read_subreg(struct atusb *lp, unsigned int addr, unsigned int mask, unsigned int shift) { int reg, ret; ret = usb_control_msg_recv(lp->usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV, 0, addr, &reg, 1, 1000, GFP_KERNEL); if (ret < 0) return ret; reg = (reg & mask) >> shift; return reg; } static int atusb_get_and_clear_error(struct atusb *atusb) { int err = atusb->err; atusb->err = 0; return err; } /* ----- skb allocation ---------------------------------------------------- */ #define MAX_PSDU 127 #define MAX_RX_XFER (1 + MAX_PSDU + 2 + 1) /* PHR+PSDU+CRC+LQI */ #define SKB_ATUSB(skb) (*(struct atusb **)(skb)->cb) static void atusb_in(struct urb *urb); static int atusb_submit_rx_urb(struct atusb *atusb, struct urb *urb) { struct usb_device *usb_dev = atusb->usb_dev; struct sk_buff *skb = urb->context; int ret; if (!skb) { skb = alloc_skb(MAX_RX_XFER, GFP_KERNEL); if (!skb) { dev_warn_ratelimited(&usb_dev->dev, "atusb_in: can't allocate skb\n"); return -ENOMEM; } skb_put(skb, MAX_RX_XFER); SKB_ATUSB(skb) = atusb; } usb_fill_bulk_urb(urb, usb_dev, usb_rcvbulkpipe(usb_dev, 1), skb->data, MAX_RX_XFER, atusb_in, skb); usb_anchor_urb(urb, &atusb->rx_urbs); ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { usb_unanchor_urb(urb); kfree_skb(skb); urb->context = NULL; } return ret; } static void atusb_work_urbs(struct work_struct *work) { struct atusb *atusb = container_of(to_delayed_work(work), struct atusb, work); struct usb_device *usb_dev = atusb->usb_dev; struct urb *urb; int ret; if (atusb->shutdown) return; do { urb = usb_get_from_anchor(&atusb->idle_urbs); if (!urb) return; ret = atusb_submit_rx_urb(atusb, urb); } while (!ret); usb_anchor_urb(urb, &atusb->idle_urbs); dev_warn_ratelimited(&usb_dev->dev, "atusb_in: can't allocate/submit URB (%d)\n", ret); schedule_delayed_work(&atusb->work, msecs_to_jiffies(ATUSB_ALLOC_DELAY_MS) + 1); } /* ----- Asynchronous USB -------------------------------------------------- */ static void atusb_tx_done(struct atusb *atusb, u8 seq, int reason) { struct usb_device *usb_dev = atusb->usb_dev; u8 expect = atusb->tx_ack_seq; dev_dbg(&usb_dev->dev, "%s (0x%02x/0x%02x)\n", __func__, seq, expect); if (seq == expect) { /* TODO check for ifs handling in firmware */ if (reason == IEEE802154_SUCCESS) ieee802154_xmit_complete(atusb->hw, atusb->tx_skb, false); else ieee802154_xmit_error(atusb->hw, atusb->tx_skb, reason); } else { /* TODO I experience this case when atusb has a tx complete * irq before probing, we should fix the firmware it's an * unlikely case now that seq == expect is then true, but can * happen and fail with a tx_skb = NULL; */ ieee802154_xmit_hw_error(atusb->hw, atusb->tx_skb); } } static void atusb_in_good(struct urb *urb) { struct usb_device *usb_dev = urb->dev; struct sk_buff *skb = urb->context; struct atusb *atusb = SKB_ATUSB(skb); int result = IEEE802154_SUCCESS; u8 len, lqi, trac; if (!urb->actual_length) { dev_dbg(&usb_dev->dev, "atusb_in: zero-sized URB ?\n"); return; } len = *skb->data; switch (urb->actual_length) { case 2: trac = TRAC_MASK(*(skb->data + 1)); switch (trac) { case TRAC_SUCCESS: case TRAC_SUCCESS_DATA_PENDING: /* already IEEE802154_SUCCESS */ break; case TRAC_CHANNEL_ACCESS_FAILURE: result = IEEE802154_CHANNEL_ACCESS_FAILURE; break; case TRAC_NO_ACK: result = IEEE802154_NO_ACK; break; default: result = IEEE802154_SYSTEM_ERROR; } fallthrough; case 1: atusb_tx_done(atusb, len, result); return; } if (len + 1 > urb->actual_length - 1) { dev_dbg(&usb_dev->dev, "atusb_in: frame len %d+1 > URB %u-1\n", len, urb->actual_length); return; } if (!ieee802154_is_valid_psdu_len(len)) { dev_dbg(&usb_dev->dev, "atusb_in: frame corrupted\n"); return; } lqi = skb->data[len + 1]; dev_dbg(&usb_dev->dev, "atusb_in: rx len %d lqi 0x%02x\n", len, lqi); skb_pull(skb, 1); /* remove PHR */ skb_trim(skb, len); /* get payload only */ ieee802154_rx_irqsafe(atusb->hw, skb, lqi); urb->context = NULL; /* skb is gone */ } static void atusb_in(struct urb *urb) { struct usb_device *usb_dev = urb->dev; struct sk_buff *skb = urb->context; struct atusb *atusb = SKB_ATUSB(skb); dev_dbg(&usb_dev->dev, "%s: status %d len %d\n", __func__, urb->status, urb->actual_length); if (urb->status) { if (urb->status == -ENOENT) { /* being killed */ kfree_skb(skb); urb->context = NULL; return; } dev_dbg(&usb_dev->dev, "%s: URB error %d\n", __func__, urb->status); } else { atusb_in_good(urb); } usb_anchor_urb(urb, &atusb->idle_urbs); if (!atusb->shutdown) schedule_delayed_work(&atusb->work, 0); } /* ----- URB allocation/deallocation --------------------------------------- */ static void atusb_free_urbs(struct atusb *atusb) { struct urb *urb; while (1) { urb = usb_get_from_anchor(&atusb->idle_urbs); if (!urb) break; kfree_skb(urb->context); usb_free_urb(urb); } } static int atusb_alloc_urbs(struct atusb *atusb, int n) { struct urb *urb; while (n) { urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { atusb_free_urbs(atusb); return -ENOMEM; } usb_anchor_urb(urb, &atusb->idle_urbs); usb_free_urb(urb); n--; } return 0; } /* ----- IEEE 802.15.4 interface operations -------------------------------- */ static void atusb_xmit_complete(struct urb *urb) { dev_dbg(&urb->dev->dev, "atusb_xmit urb completed"); } static int atusb_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) { struct atusb *atusb = hw->priv; struct usb_device *usb_dev = atusb->usb_dev; int ret; dev_dbg(&usb_dev->dev, "%s (%d)\n", __func__, skb->len); atusb->tx_skb = skb; atusb->tx_ack_seq++; atusb->tx_dr.wIndex = cpu_to_le16(atusb->tx_ack_seq); atusb->tx_dr.wLength = cpu_to_le16(skb->len); usb_fill_control_urb(atusb->tx_urb, usb_dev, usb_sndctrlpipe(usb_dev, 0), (unsigned char *)&atusb->tx_dr, skb->data, skb->len, atusb_xmit_complete, NULL); ret = usb_submit_urb(atusb->tx_urb, GFP_ATOMIC); dev_dbg(&usb_dev->dev, "%s done (%d)\n", __func__, ret); return ret; } static int atusb_ed(struct ieee802154_hw *hw, u8 *level) { WARN_ON(!level); *level = 0xbe; return 0; } static int atusb_set_hw_addr_filt(struct ieee802154_hw *hw, struct ieee802154_hw_addr_filt *filt, unsigned long changed) { struct atusb *atusb = hw->priv; struct device *dev = &atusb->usb_dev->dev; if (changed & IEEE802154_AFILT_SADDR_CHANGED) { u16 addr = le16_to_cpu(filt->short_addr); dev_vdbg(dev, "%s called for saddr\n", __func__); usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV, addr, RG_SHORT_ADDR_0, NULL, 0, 1000, GFP_KERNEL); usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV, addr >> 8, RG_SHORT_ADDR_1, NULL, 0, 1000, GFP_KERNEL); } if (changed & IEEE802154_AFILT_PANID_CHANGED) { u16 pan = le16_to_cpu(filt->pan_id); dev_vdbg(dev, "%s called for pan id\n", __func__); usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV, pan, RG_PAN_ID_0, NULL, 0, 1000, GFP_KERNEL); usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV, pan >> 8, RG_PAN_ID_1, NULL, 0, 1000, GFP_KERNEL); } if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) { u8 i, addr[IEEE802154_EXTENDED_ADDR_LEN]; memcpy(addr, &filt->ieee_addr, IEEE802154_EXTENDED_ADDR_LEN); dev_vdbg(dev, "%s called for IEEE addr\n", __func__); for (i = 0; i < 8; i++) usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV, addr[i], RG_IEEE_ADDR_0 + i, NULL, 0, 1000, GFP_KERNEL); } if (changed & IEEE802154_AFILT_PANC_CHANGED) { dev_vdbg(dev, "%s called for panc change\n", __func__); if (filt->pan_coord) atusb_write_subreg(atusb, SR_AACK_I_AM_COORD, 1); else atusb_write_subreg(atusb, SR_AACK_I_AM_COORD, 0); } return atusb_get_and_clear_error(atusb); } static int atusb_start(struct ieee802154_hw *hw) { struct atusb *atusb = hw->priv; struct usb_device *usb_dev = atusb->usb_dev; int ret; dev_dbg(&usb_dev->dev, "%s\n", __func__); schedule_delayed_work(&atusb->work, 0); usb_control_msg_send(atusb->usb_dev, 0, ATUSB_RX_MODE, ATUSB_REQ_TO_DEV, 1, 0, NULL, 0, 1000, GFP_KERNEL); ret = atusb_get_and_clear_error(atusb); if (ret < 0) usb_kill_anchored_urbs(&atusb->idle_urbs); return ret; } static void atusb_stop(struct ieee802154_hw *hw) { struct atusb *atusb = hw->priv; struct usb_device *usb_dev = atusb->usb_dev; dev_dbg(&usb_dev->dev, "%s\n", __func__); usb_kill_anchored_urbs(&atusb->idle_urbs); usb_control_msg_send(atusb->usb_dev, 0, ATUSB_RX_MODE, ATUSB_REQ_TO_DEV, 0, 0, NULL, 0, 1000, GFP_KERNEL); atusb_get_and_clear_error(atusb); } #define ATUSB_MAX_TX_POWERS 0xF static const s32 atusb_powers[ATUSB_MAX_TX_POWERS + 1] = { 300, 280, 230, 180, 130, 70, 0, -100, -200, -300, -400, -500, -700, -900, -1200, -1700, }; static int atusb_txpower(struct ieee802154_hw *hw, s32 mbm) { struct atusb *atusb = hw->priv; if (atusb->data) return atusb->data->set_txpower(hw, mbm); else return -ENOTSUPP; } static int atusb_set_txpower(struct ieee802154_hw *hw, s32 mbm) { struct atusb *atusb = hw->priv; u32 i; for (i = 0; i < hw->phy->supported.tx_powers_size; i++) { if (hw->phy->supported.tx_powers[i] == mbm) return atusb_write_subreg(atusb, SR_TX_PWR_23X, i); } return -EINVAL; } static int hulusb_set_txpower(struct ieee802154_hw *hw, s32 mbm) { u32 i; for (i = 0; i < hw->phy->supported.tx_powers_size; i++) { if (hw->phy->supported.tx_powers[i] == mbm) return atusb_write_subreg(hw->priv, SR_TX_PWR_212, i); } return -EINVAL; } #define ATUSB_MAX_ED_LEVELS 0xF static const s32 atusb_ed_levels[ATUSB_MAX_ED_LEVELS + 1] = { -9100, -8900, -8700, -8500, -8300, -8100, -7900, -7700, -7500, -7300, -7100, -6900, -6700, -6500, -6300, -6100, }; #define AT86RF212_MAX_TX_POWERS 0x1F static const s32 at86rf212_powers[AT86RF212_MAX_TX_POWERS + 1] = { 500, 400, 300, 200, 100, 0, -100, -200, -300, -400, -500, -600, -700, -800, -900, -1000, -1100, -1200, -1300, -1400, -1500, -1600, -1700, -1800, -1900, -2000, -2100, -2200, -2300, -2400, -2500, -2600, }; #define AT86RF2XX_MAX_ED_LEVELS 0xF static const s32 at86rf212_ed_levels_100[AT86RF2XX_MAX_ED_LEVELS + 1] = { -10000, -9800, -9600, -9400, -9200, -9000, -8800, -8600, -8400, -8200, -8000, -7800, -7600, -7400, -7200, -7000, }; static const s32 at86rf212_ed_levels_98[AT86RF2XX_MAX_ED_LEVELS + 1] = { -9800, -9600, -9400, -9200, -9000, -8800, -8600, -8400, -8200, -8000, -7800, -7600, -7400, -7200, -7000, -6800, }; static int atusb_set_cca_mode(struct ieee802154_hw *hw, const struct wpan_phy_cca *cca) { struct atusb *atusb = hw->priv; u8 val; /* mapping 802.15.4 to driver spec */ switch (cca->mode) { case NL802154_CCA_ENERGY: val = 1; break; case NL802154_CCA_CARRIER: val = 2; break; case NL802154_CCA_ENERGY_CARRIER: switch (cca->opt) { case NL802154_CCA_OPT_ENERGY_CARRIER_AND: val = 3; break; case NL802154_CCA_OPT_ENERGY_CARRIER_OR: val = 0; break; default: return -EINVAL; } break; default: return -EINVAL; } return atusb_write_subreg(atusb, SR_CCA_MODE, val); } static int hulusb_set_cca_ed_level(struct atusb *lp, int rssi_base_val) { int cca_ed_thres; cca_ed_thres = atusb_read_subreg(lp, SR_CCA_ED_THRES); if (cca_ed_thres < 0) return cca_ed_thres; switch (rssi_base_val) { case -98: lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_98; lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_98); lp->hw->phy->cca_ed_level = at86rf212_ed_levels_98[cca_ed_thres]; break; case -100: lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_100; lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_100); lp->hw->phy->cca_ed_level = at86rf212_ed_levels_100[cca_ed_thres]; break; default: WARN_ON(1); } return 0; } static int atusb_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm) { struct atusb *atusb = hw->priv; u32 i; for (i = 0; i < hw->phy->supported.cca_ed_levels_size; i++) { if (hw->phy->supported.cca_ed_levels[i] == mbm) return atusb_write_subreg(atusb, SR_CCA_ED_THRES, i); } return -EINVAL; } static int atusb_channel(struct ieee802154_hw *hw, u8 page, u8 channel) { struct atusb *atusb = hw->priv; int ret = -ENOTSUPP; if (atusb->data) { ret = atusb->data->set_channel(hw, page, channel); /* @@@ ugly synchronization */ msleep(atusb->data->t_channel_switch); } return ret; } static int atusb_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel) { struct atusb *atusb = hw->priv; int ret; ret = atusb_write_subreg(atusb, SR_CHANNEL, channel); if (ret < 0) return ret; return 0; } static int hulusb_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel) { int rc; int rssi_base_val; struct atusb *lp = hw->priv; if (channel == 0) rc = atusb_write_subreg(lp, SR_SUB_MODE, 0); else rc = atusb_write_subreg(lp, SR_SUB_MODE, 1); if (rc < 0) return rc; if (page == 0) { rc = atusb_write_subreg(lp, SR_BPSK_QPSK, 0); rssi_base_val = -100; } else { rc = atusb_write_subreg(lp, SR_BPSK_QPSK, 1); rssi_base_val = -98; } if (rc < 0) return rc; rc = hulusb_set_cca_ed_level(lp, rssi_base_val); if (rc < 0) return rc; return atusb_write_subreg(lp, SR_CHANNEL, channel); } static int atusb_set_csma_params(struct ieee802154_hw *hw, u8 min_be, u8 max_be, u8 retries) { struct atusb *atusb = hw->priv; int ret; ret = atusb_write_subreg(atusb, SR_MIN_BE, min_be); if (ret) return ret; ret = atusb_write_subreg(atusb, SR_MAX_BE, max_be); if (ret) return ret; return atusb_write_subreg(atusb, SR_MAX_CSMA_RETRIES, retries); } static int hulusb_set_lbt(struct ieee802154_hw *hw, bool on) { struct atusb *atusb = hw->priv; return atusb_write_subreg(atusb, SR_CSMA_LBT_MODE, on); } static int atusb_set_frame_retries(struct ieee802154_hw *hw, s8 retries) { struct atusb *atusb = hw->priv; return atusb_write_subreg(atusb, SR_MAX_FRAME_RETRIES, retries); } static int atusb_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on) { struct atusb *atusb = hw->priv; int ret; if (on) { ret = atusb_write_subreg(atusb, SR_AACK_DIS_ACK, 1); if (ret < 0) return ret; ret = atusb_write_subreg(atusb, SR_AACK_PROM_MODE, 1); if (ret < 0) return ret; } else { ret = atusb_write_subreg(atusb, SR_AACK_PROM_MODE, 0); if (ret < 0) return ret; ret = atusb_write_subreg(atusb, SR_AACK_DIS_ACK, 0); if (ret < 0) return ret; } return 0; } static struct atusb_chip_data atusb_chip_data = { .t_channel_switch = 1, .rssi_base_val = -91, .set_txpower = atusb_set_txpower, .set_channel = atusb_set_channel, }; static struct atusb_chip_data hulusb_chip_data = { .t_channel_switch = 11, .rssi_base_val = -100, .set_txpower = hulusb_set_txpower, .set_channel = hulusb_set_channel, }; static const struct ieee802154_ops atusb_ops = { .owner = THIS_MODULE, .xmit_async = atusb_xmit, .ed = atusb_ed, .set_channel = atusb_channel, .start = atusb_start, .stop = atusb_stop, .set_hw_addr_filt = atusb_set_hw_addr_filt, .set_txpower = atusb_txpower, .set_lbt = hulusb_set_lbt, .set_cca_mode = atusb_set_cca_mode, .set_cca_ed_level = atusb_set_cca_ed_level, .set_csma_params = atusb_set_csma_params, .set_frame_retries = atusb_set_frame_retries, .set_promiscuous_mode = atusb_set_promiscuous_mode, }; /* ----- Firmware and chip version information ----------------------------- */ static int atusb_get_and_show_revision(struct atusb *atusb) { struct usb_device *usb_dev = atusb->usb_dev; char *hw_name; unsigned char buffer[3]; int ret; /* Get a couple of the ATMega Firmware values */ ret = usb_control_msg_recv(atusb->usb_dev, 0, ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0, buffer, 3, 1000, GFP_KERNEL); if (!ret) { atusb->fw_ver_maj = buffer[0]; atusb->fw_ver_min = buffer[1]; atusb->fw_hw_type = buffer[2]; switch (atusb->fw_hw_type) { case ATUSB_HW_TYPE_100813: case ATUSB_HW_TYPE_101216: case ATUSB_HW_TYPE_110131: hw_name = "ATUSB"; atusb->data = &atusb_chip_data; break; case ATUSB_HW_TYPE_RZUSB: hw_name = "RZUSB"; atusb->data = &atusb_chip_data; break; case ATUSB_HW_TYPE_HULUSB: hw_name = "HULUSB"; atusb->data = &hulusb_chip_data; break; default: hw_name = "UNKNOWN"; atusb->err = -ENOTSUPP; ret = -ENOTSUPP; break; } dev_info(&usb_dev->dev, "Firmware: major: %u, minor: %u, hardware type: %s (%d)\n", atusb->fw_ver_maj, atusb->fw_ver_min, hw_name, atusb->fw_hw_type); } if (atusb->fw_ver_maj == 0 && atusb->fw_ver_min < 2) { dev_info(&usb_dev->dev, "Firmware version (%u.%u) predates our first public release.", atusb->fw_ver_maj, atusb->fw_ver_min); dev_info(&usb_dev->dev, "Please update to version 0.2 or newer"); } return ret; } static int atusb_get_and_show_build(struct atusb *atusb) { struct usb_device *usb_dev = atusb->usb_dev; char *build; int ret; build = kmalloc(ATUSB_BUILD_SIZE + 1, GFP_KERNEL); if (!build) return -ENOMEM; ret = usb_control_msg(atusb->usb_dev, usb_rcvctrlpipe(usb_dev, 0), ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0, build, ATUSB_BUILD_SIZE, 1000); if (ret >= 0) { build[ret] = 0; dev_info(&usb_dev->dev, "Firmware: build %s\n", build); } kfree(build); return ret; } static int atusb_get_and_conf_chip(struct atusb *atusb) { struct usb_device *usb_dev = atusb->usb_dev; u8 man_id_0, man_id_1, part_num, version_num; const char *chip; struct ieee802154_hw *hw = atusb->hw; int ret; ret = usb_control_msg_recv(usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV, 0, RG_MAN_ID_0, &man_id_0, 1, 1000, GFP_KERNEL); if (ret < 0) return ret; ret = usb_control_msg_recv(usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV, 0, RG_MAN_ID_1, &man_id_1, 1, 1000, GFP_KERNEL); if (ret < 0) return ret; ret = usb_control_msg_recv(usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV, 0, RG_PART_NUM, &part_num, 1, 1000, GFP_KERNEL); if (ret < 0) return ret; ret = usb_control_msg_recv(usb_dev, 0, ATUSB_REG_READ, ATUSB_REQ_FROM_DEV, 0, RG_VERSION_NUM, &version_num, 1, 1000, GFP_KERNEL); if (ret < 0) return ret; hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT | IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS; hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL | WPAN_PHY_FLAG_CCA_MODE; hw->phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) | BIT(NL802154_CCA_CARRIER) | BIT(NL802154_CCA_ENERGY_CARRIER); hw->phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) | BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR); hw->phy->cca.mode = NL802154_CCA_ENERGY; hw->phy->current_page = 0; if ((man_id_1 << 8 | man_id_0) != ATUSB_JEDEC_ATMEL) { dev_err(&usb_dev->dev, "non-Atmel transceiver xxxx%02x%02x\n", man_id_1, man_id_0); goto fail; } switch (part_num) { case 2: chip = "AT86RF230"; atusb->hw->phy->supported.channels[0] = 0x7FFF800; atusb->hw->phy->current_channel = 11; /* reset default */ atusb->hw->phy->supported.tx_powers = atusb_powers; atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers); hw->phy->supported.cca_ed_levels = atusb_ed_levels; hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(atusb_ed_levels); break; case 3: chip = "AT86RF231"; atusb->hw->phy->supported.channels[0] = 0x7FFF800; atusb->hw->phy->current_channel = 11; /* reset default */ atusb->hw->phy->supported.tx_powers = atusb_powers; atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers); hw->phy->supported.cca_ed_levels = atusb_ed_levels; hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(atusb_ed_levels); break; case 7: chip = "AT86RF212"; atusb->hw->flags |= IEEE802154_HW_LBT; atusb->hw->phy->supported.channels[0] = 0x00007FF; atusb->hw->phy->supported.channels[2] = 0x00007FF; atusb->hw->phy->current_channel = 5; atusb->hw->phy->supported.lbt = NL802154_SUPPORTED_BOOL_BOTH; atusb->hw->phy->supported.tx_powers = at86rf212_powers; atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf212_powers); atusb->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_100; atusb->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_100); break; default: dev_err(&usb_dev->dev, "unexpected transceiver, part 0x%02x version 0x%02x\n", part_num, version_num); goto fail; } hw->phy->transmit_power = hw->phy->supported.tx_powers[0]; hw->phy->cca_ed_level = hw->phy->supported.cca_ed_levels[7]; dev_info(&usb_dev->dev, "ATUSB: %s version %d\n", chip, version_num); return 0; fail: atusb->err = -ENODEV; return -ENODEV; } static int atusb_set_extended_addr(struct atusb *atusb) { struct usb_device *usb_dev = atusb->usb_dev; unsigned char buffer[IEEE802154_EXTENDED_ADDR_LEN]; __le64 extended_addr; u64 addr; int ret; /* Firmware versions before 0.3 do not support the EUI64_READ command. * Just use a random address and be done. */ if (atusb->fw_ver_maj == 0 && atusb->fw_ver_min < 3) { ieee802154_random_extended_addr(&atusb->hw->phy->perm_extended_addr); return 0; } /* Firmware is new enough so we fetch the address from EEPROM */ ret = usb_control_msg_recv(atusb->usb_dev, 0, ATUSB_EUI64_READ, ATUSB_REQ_FROM_DEV, 0, 0, buffer, IEEE802154_EXTENDED_ADDR_LEN, 1000, GFP_KERNEL); if (ret < 0) { dev_err(&usb_dev->dev, "failed to fetch extended address, random address set\n"); ieee802154_random_extended_addr(&atusb->hw->phy->perm_extended_addr); return ret; } memcpy(&extended_addr, buffer, IEEE802154_EXTENDED_ADDR_LEN); /* Check if read address is not empty and the unicast bit is set correctly */ if (!ieee802154_is_valid_extended_unicast_addr(extended_addr)) { dev_info(&usb_dev->dev, "no permanent extended address found, random address set\n"); ieee802154_random_extended_addr(&atusb->hw->phy->perm_extended_addr); } else { atusb->hw->phy->perm_extended_addr = extended_addr; addr = swab64((__force u64)atusb->hw->phy->perm_extended_addr); dev_info(&usb_dev->dev, "Read permanent extended address %8phC from device\n", &addr); } return ret; } /* ----- Setup ------------------------------------------------------------- */ static int atusb_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *usb_dev = interface_to_usbdev(interface); struct ieee802154_hw *hw; struct atusb *atusb = NULL; int ret = -ENOMEM; hw = ieee802154_alloc_hw(sizeof(struct atusb), &atusb_ops); if (!hw) return -ENOMEM; atusb = hw->priv; atusb->hw = hw; atusb->usb_dev = usb_get_dev(usb_dev); usb_set_intfdata(interface, atusb); atusb->shutdown = 0; atusb->err = 0; INIT_DELAYED_WORK(&atusb->work, atusb_work_urbs); init_usb_anchor(&atusb->idle_urbs); init_usb_anchor(&atusb->rx_urbs); if (atusb_alloc_urbs(atusb, ATUSB_NUM_RX_URBS)) goto fail; atusb->tx_dr.bRequestType = ATUSB_REQ_TO_DEV; atusb->tx_dr.bRequest = ATUSB_TX; atusb->tx_dr.wValue = cpu_to_le16(0); atusb->tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!atusb->tx_urb) goto fail; hw->parent = &usb_dev->dev; usb_control_msg_send(atusb->usb_dev, 0, ATUSB_RF_RESET, ATUSB_REQ_TO_DEV, 0, 0, NULL, 0, 1000, GFP_KERNEL); atusb_get_and_conf_chip(atusb); atusb_get_and_show_revision(atusb); atusb_get_and_show_build(atusb); atusb_set_extended_addr(atusb); if ((atusb->fw_ver_maj == 0 && atusb->fw_ver_min >= 3) || atusb->fw_ver_maj > 0) hw->flags |= IEEE802154_HW_FRAME_RETRIES; ret = atusb_get_and_clear_error(atusb); if (ret) { dev_err(&atusb->usb_dev->dev, "%s: initialization failed, error = %d\n", __func__, ret); goto fail; } ret = ieee802154_register_hw(hw); if (ret) goto fail; /* If we just powered on, we're now in P_ON and need to enter TRX_OFF * explicitly. Any resets after that will send us straight to TRX_OFF, * making the command below redundant. */ usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV, STATE_FORCE_TRX_OFF, RG_TRX_STATE, NULL, 0, 1000, GFP_KERNEL); msleep(1); /* reset => TRX_OFF, tTR13 = 37 us */ #if 0 /* Calculating the maximum time available to empty the frame buffer * on reception: * * According to [1], the inter-frame gap is * R * 20 * 16 us + 128 us * where R is a random number from 0 to 7. Furthermore, we have 20 bit * times (80 us at 250 kbps) of SHR of the next frame before the * transceiver begins storing data in the frame buffer. * * This yields a minimum time of 208 us between the last data of a * frame and the first data of the next frame. This time is further * reduced by interrupt latency in the atusb firmware. * * atusb currently needs about 500 us to retrieve a maximum-sized * frame. We therefore have to allow reception of a new frame to begin * while we retrieve the previous frame. * * [1] "JN-AN-1035 Calculating data rates in an IEEE 802.15.4-based * network", Jennic 2006. * http://www.jennic.com/download_file.php?supportFile=JN-AN-1035%20Calculating%20802-15-4%20Data%20Rates-1v0.pdf */ atusb_write_subreg(atusb, SR_RX_SAFE_MODE, 1); #endif usb_control_msg_send(atusb->usb_dev, 0, ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV, 0xff, RG_IRQ_MASK, NULL, 0, 1000, GFP_KERNEL); ret = atusb_get_and_clear_error(atusb); if (!ret) return 0; dev_err(&atusb->usb_dev->dev, "%s: setup failed, error = %d\n", __func__, ret); ieee802154_unregister_hw(hw); fail: atusb_free_urbs(atusb); usb_kill_urb(atusb->tx_urb); usb_free_urb(atusb->tx_urb); usb_put_dev(usb_dev); ieee802154_free_hw(hw); return ret; } static void atusb_disconnect(struct usb_interface *interface) { struct atusb *atusb = usb_get_intfdata(interface); dev_dbg(&atusb->usb_dev->dev, "%s\n", __func__); atusb->shutdown = 1; cancel_delayed_work_sync(&atusb->work); usb_kill_anchored_urbs(&atusb->rx_urbs); atusb_free_urbs(atusb); usb_kill_urb(atusb->tx_urb); usb_free_urb(atusb->tx_urb); ieee802154_unregister_hw(atusb->hw); usb_put_dev(atusb->usb_dev); ieee802154_free_hw(atusb->hw); usb_set_intfdata(interface, NULL); pr_debug("%s done\n", __func__); } /* The devices we work with */ static const struct usb_device_id atusb_device_table[] = { { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = ATUSB_VENDOR_ID, .idProduct = ATUSB_PRODUCT_ID, .bInterfaceClass = USB_CLASS_VENDOR_SPEC }, /* end with null element */ {} }; MODULE_DEVICE_TABLE(usb, atusb_device_table); static struct usb_driver atusb_driver = { .name = "atusb", .probe = atusb_probe, .disconnect = atusb_disconnect, .id_table = atusb_device_table, }; module_usb_driver(atusb_driver); MODULE_AUTHOR("Alexander Aring <alex.aring@gmail.com>"); MODULE_AUTHOR("Richard Sharpe <realrichardsharpe@gmail.com>"); MODULE_AUTHOR("Stefan Schmidt <stefan@datenfreihafen.org>"); MODULE_AUTHOR("Werner Almesberger <werner@almesberger.net>"); MODULE_AUTHOR("Josef Filzmaier <j.filzmaier@gmx.at>"); MODULE_DESCRIPTION("ATUSB IEEE 802.15.4 Driver"); MODULE_LICENSE("GPL");
4 4 4 8 2 2 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 3 3 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2016 Mellanox Technologies. All rights reserved. * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com> */ #include "devl_internal.h" struct devlink_sb { struct list_head list; unsigned int index; u32 size; u16 ingress_pools_count; u16 egress_pools_count; u16 ingress_tc_count; u16 egress_tc_count; }; static u16 devlink_sb_pool_count(struct devlink_sb *devlink_sb) { return devlink_sb->ingress_pools_count + devlink_sb->egress_pools_count; } static struct devlink_sb *devlink_sb_get_by_index(struct devlink *devlink, unsigned int sb_index) { struct devlink_sb *devlink_sb; list_for_each_entry(devlink_sb, &devlink->sb_list, list) { if (devlink_sb->index == sb_index) return devlink_sb; } return NULL; } static bool devlink_sb_index_exists(struct devlink *devlink, unsigned int sb_index) { return devlink_sb_get_by_index(devlink, sb_index); } static struct devlink_sb *devlink_sb_get_from_attrs(struct devlink *devlink, struct nlattr **attrs) { if (attrs[DEVLINK_ATTR_SB_INDEX]) { u32 sb_index = nla_get_u32(attrs[DEVLINK_ATTR_SB_INDEX]); struct devlink_sb *devlink_sb; devlink_sb = devlink_sb_get_by_index(devlink, sb_index); if (!devlink_sb) return ERR_PTR(-ENODEV); return devlink_sb; } return ERR_PTR(-EINVAL); } static struct devlink_sb *devlink_sb_get_from_info(struct devlink *devlink, struct genl_info *info) { return devlink_sb_get_from_attrs(devlink, info->attrs); } static int devlink_sb_pool_index_get_from_attrs(struct devlink_sb *devlink_sb, struct nlattr **attrs, u16 *p_pool_index) { u16 val; if (!attrs[DEVLINK_ATTR_SB_POOL_INDEX]) return -EINVAL; val = nla_get_u16(attrs[DEVLINK_ATTR_SB_POOL_INDEX]); if (val >= devlink_sb_pool_count(devlink_sb)) return -EINVAL; *p_pool_index = val; return 0; } static int devlink_sb_pool_index_get_from_info(struct devlink_sb *devlink_sb, struct genl_info *info, u16 *p_pool_index) { return devlink_sb_pool_index_get_from_attrs(devlink_sb, info->attrs, p_pool_index); } static int devlink_sb_pool_type_get_from_attrs(struct nlattr **attrs, enum devlink_sb_pool_type *p_pool_type) { u8 val; if (!attrs[DEVLINK_ATTR_SB_POOL_TYPE]) return -EINVAL; val = nla_get_u8(attrs[DEVLINK_ATTR_SB_POOL_TYPE]); if (val != DEVLINK_SB_POOL_TYPE_INGRESS && val != DEVLINK_SB_POOL_TYPE_EGRESS) return -EINVAL; *p_pool_type = val; return 0; } static int devlink_sb_pool_type_get_from_info(struct genl_info *info, enum devlink_sb_pool_type *p_pool_type) { return devlink_sb_pool_type_get_from_attrs(info->attrs, p_pool_type); } static int devlink_sb_th_type_get_from_attrs(struct nlattr **attrs, enum devlink_sb_threshold_type *p_th_type) { u8 val; if (!attrs[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE]) return -EINVAL; val = nla_get_u8(attrs[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE]); if (val != DEVLINK_SB_THRESHOLD_TYPE_STATIC && val != DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC) return -EINVAL; *p_th_type = val; return 0; } static int devlink_sb_th_type_get_from_info(struct genl_info *info, enum devlink_sb_threshold_type *p_th_type) { return devlink_sb_th_type_get_from_attrs(info->attrs, p_th_type); } static int devlink_sb_tc_index_get_from_attrs(struct devlink_sb *devlink_sb, struct nlattr **attrs, enum devlink_sb_pool_type pool_type, u16 *p_tc_index) { u16 val; if (!attrs[DEVLINK_ATTR_SB_TC_INDEX]) return -EINVAL; val = nla_get_u16(attrs[DEVLINK_ATTR_SB_TC_INDEX]); if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS && val >= devlink_sb->ingress_tc_count) return -EINVAL; if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS && val >= devlink_sb->egress_tc_count) return -EINVAL; *p_tc_index = val; return 0; } static int devlink_sb_tc_index_get_from_info(struct devlink_sb *devlink_sb, struct genl_info *info, enum devlink_sb_pool_type pool_type, u16 *p_tc_index) { return devlink_sb_tc_index_get_from_attrs(devlink_sb, info->attrs, pool_type, p_tc_index); } static int devlink_nl_sb_fill(struct sk_buff *msg, struct devlink *devlink, struct devlink_sb *devlink_sb, enum devlink_command cmd, u32 portid, u32 seq, int flags) { void *hdr; hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); if (!hdr) return -EMSGSIZE; if (devlink_nl_put_handle(msg, devlink)) goto nla_put_failure; if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index)) goto nla_put_failure; if (nla_put_u32(msg, DEVLINK_ATTR_SB_SIZE, devlink_sb->size)) goto nla_put_failure; if (nla_put_u16(msg, DEVLINK_ATTR_SB_INGRESS_POOL_COUNT, devlink_sb->ingress_pools_count)) goto nla_put_failure; if (nla_put_u16(msg, DEVLINK_ATTR_SB_EGRESS_POOL_COUNT, devlink_sb->egress_pools_count)) goto nla_put_failure; if (nla_put_u16(msg, DEVLINK_ATTR_SB_INGRESS_TC_COUNT, devlink_sb->ingress_tc_count)) goto nla_put_failure; if (nla_put_u16(msg, DEVLINK_ATTR_SB_EGRESS_TC_COUNT, devlink_sb->egress_tc_count)) goto nla_put_failure; genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } int devlink_nl_sb_get_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; struct devlink_sb *devlink_sb; struct sk_buff *msg; int err; devlink_sb = devlink_sb_get_from_info(devlink, info); if (IS_ERR(devlink_sb)) return PTR_ERR(devlink_sb); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; err = devlink_nl_sb_fill(msg, devlink, devlink_sb, DEVLINK_CMD_SB_NEW, info->snd_portid, info->snd_seq, 0); if (err) { nlmsg_free(msg); return err; } return genlmsg_reply(msg, info); } static int devlink_nl_sb_get_dump_one(struct sk_buff *msg, struct devlink *devlink, struct netlink_callback *cb, int flags) { struct devlink_nl_dump_state *state = devlink_dump_state(cb); struct devlink_sb *devlink_sb; int idx = 0; int err = 0; list_for_each_entry(devlink_sb, &devlink->sb_list, list) { if (idx < state->idx) { idx++; continue; } err = devlink_nl_sb_fill(msg, devlink, devlink_sb, DEVLINK_CMD_SB_NEW, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, flags); if (err) { state->idx = idx; break; } idx++; } return err; } int devlink_nl_sb_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { return devlink_nl_dumpit(skb, cb, devlink_nl_sb_get_dump_one); } static int devlink_nl_sb_pool_fill(struct sk_buff *msg, struct devlink *devlink, struct devlink_sb *devlink_sb, u16 pool_index, enum devlink_command cmd, u32 portid, u32 seq, int flags) { struct devlink_sb_pool_info pool_info; void *hdr; int err; err = devlink->ops->sb_pool_get(devlink, devlink_sb->index, pool_index, &pool_info); if (err) return err; hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); if (!hdr) return -EMSGSIZE; if (devlink_nl_put_handle(msg, devlink)) goto nla_put_failure; if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index)) goto nla_put_failure; if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index)) goto nla_put_failure; if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_TYPE, pool_info.pool_type)) goto nla_put_failure; if (nla_put_u32(msg, DEVLINK_ATTR_SB_POOL_SIZE, pool_info.size)) goto nla_put_failure; if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE, pool_info.threshold_type)) goto nla_put_failure; if (nla_put_u32(msg, DEVLINK_ATTR_SB_POOL_CELL_SIZE, pool_info.cell_size)) goto nla_put_failure; genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } int devlink_nl_sb_pool_get_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; struct devlink_sb *devlink_sb; struct sk_buff *msg; u16 pool_index; int err; devlink_sb = devlink_sb_get_from_info(devlink, info); if (IS_ERR(devlink_sb)) return PTR_ERR(devlink_sb); err = devlink_sb_pool_index_get_from_info(devlink_sb, info, &pool_index); if (err) return err; if (!devlink->ops->sb_pool_get) return -EOPNOTSUPP; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; err = devlink_nl_sb_pool_fill(msg, devlink, devlink_sb, pool_index, DEVLINK_CMD_SB_POOL_NEW, info->snd_portid, info->snd_seq, 0); if (err) { nlmsg_free(msg); return err; } return genlmsg_reply(msg, info); } static int __sb_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx, struct devlink *devlink, struct devlink_sb *devlink_sb, u32 portid, u32 seq, int flags) { u16 pool_count = devlink_sb_pool_count(devlink_sb); u16 pool_index; int err; for (pool_index = 0; pool_index < pool_count; pool_index++) { if (*p_idx < start) { (*p_idx)++; continue; } err = devlink_nl_sb_pool_fill(msg, devlink, devlink_sb, pool_index, DEVLINK_CMD_SB_POOL_NEW, portid, seq, flags); if (err) return err; (*p_idx)++; } return 0; } static int devlink_nl_sb_pool_get_dump_one(struct sk_buff *msg, struct devlink *devlink, struct netlink_callback *cb, int flags) { struct devlink_nl_dump_state *state = devlink_dump_state(cb); struct devlink_sb *devlink_sb; int err = 0; int idx = 0; if (!devlink->ops->sb_pool_get) return 0; list_for_each_entry(devlink_sb, &devlink->sb_list, list) { err = __sb_pool_get_dumpit(msg, state->idx, &idx, devlink, devlink_sb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, flags); if (err == -EOPNOTSUPP) { err = 0; } else if (err) { state->idx = idx; break; } } return err; } int devlink_nl_sb_pool_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { return devlink_nl_dumpit(skb, cb, devlink_nl_sb_pool_get_dump_one); } static int devlink_sb_pool_set(struct devlink *devlink, unsigned int sb_index, u16 pool_index, u32 size, enum devlink_sb_threshold_type threshold_type, struct netlink_ext_ack *extack) { const struct devlink_ops *ops = devlink->ops; if (ops->sb_pool_set) return ops->sb_pool_set(devlink, sb_index, pool_index, size, threshold_type, extack); return -EOPNOTSUPP; } int devlink_nl_sb_pool_set_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; enum devlink_sb_threshold_type threshold_type; struct devlink_sb *devlink_sb; u16 pool_index; u32 size; int err; devlink_sb = devlink_sb_get_from_info(devlink, info); if (IS_ERR(devlink_sb)) return PTR_ERR(devlink_sb); err = devlink_sb_pool_index_get_from_info(devlink_sb, info, &pool_index); if (err) return err; err = devlink_sb_th_type_get_from_info(info, &threshold_type); if (err) return err; if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SB_POOL_SIZE)) return -EINVAL; size = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_POOL_SIZE]); return devlink_sb_pool_set(devlink, devlink_sb->index, pool_index, size, threshold_type, info->extack); } static int devlink_nl_sb_port_pool_fill(struct sk_buff *msg, struct devlink *devlink, struct devlink_port *devlink_port, struct devlink_sb *devlink_sb, u16 pool_index, enum devlink_command cmd, u32 portid, u32 seq, int flags) { const struct devlink_ops *ops = devlink->ops; u32 threshold; void *hdr; int err; err = ops->sb_port_pool_get(devlink_port, devlink_sb->index, pool_index, &threshold); if (err) return err; hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); if (!hdr) return -EMSGSIZE; if (devlink_nl_put_handle(msg, devlink)) goto nla_put_failure; if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index)) goto nla_put_failure; if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index)) goto nla_put_failure; if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index)) goto nla_put_failure; if (nla_put_u32(msg, DEVLINK_ATTR_SB_THRESHOLD, threshold)) goto nla_put_failure; if (ops->sb_occ_port_pool_get) { u32 cur; u32 max; err = ops->sb_occ_port_pool_get(devlink_port, devlink_sb->index, pool_index, &cur, &max); if (err && err != -EOPNOTSUPP) goto sb_occ_get_failure; if (!err) { if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur)) goto nla_put_failure; if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_MAX, max)) goto nla_put_failure; } } genlmsg_end(msg, hdr); return 0; nla_put_failure: err = -EMSGSIZE; sb_occ_get_failure: genlmsg_cancel(msg, hdr); return err; } int devlink_nl_sb_port_pool_get_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink_port *devlink_port = info->user_ptr[1]; struct devlink *devlink = devlink_port->devlink; struct devlink_sb *devlink_sb; struct sk_buff *msg; u16 pool_index; int err; devlink_sb = devlink_sb_get_from_info(devlink, info); if (IS_ERR(devlink_sb)) return PTR_ERR(devlink_sb); err = devlink_sb_pool_index_get_from_info(devlink_sb, info, &pool_index); if (err) return err; if (!devlink->ops->sb_port_pool_get) return -EOPNOTSUPP; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; err = devlink_nl_sb_port_pool_fill(msg, devlink, devlink_port, devlink_sb, pool_index, DEVLINK_CMD_SB_PORT_POOL_NEW, info->snd_portid, info->snd_seq, 0); if (err) { nlmsg_free(msg); return err; } return genlmsg_reply(msg, info); } static int __sb_port_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx, struct devlink *devlink, struct devlink_sb *devlink_sb, u32 portid, u32 seq, int flags) { struct devlink_port *devlink_port; u16 pool_count = devlink_sb_pool_count(devlink_sb); unsigned long port_index; u16 pool_index; int err; xa_for_each(&devlink->ports, port_index, devlink_port) { for (pool_index = 0; pool_index < pool_count; pool_index++) { if (*p_idx < start) { (*p_idx)++; continue; } err = devlink_nl_sb_port_pool_fill(msg, devlink, devlink_port, devlink_sb, pool_index, DEVLINK_CMD_SB_PORT_POOL_NEW, portid, seq, flags); if (err) return err; (*p_idx)++; } } return 0; } static int devlink_nl_sb_port_pool_get_dump_one(struct sk_buff *msg, struct devlink *devlink, struct netlink_callback *cb, int flags) { struct devlink_nl_dump_state *state = devlink_dump_state(cb); struct devlink_sb *devlink_sb; int idx = 0; int err = 0; if (!devlink->ops->sb_port_pool_get) return 0; list_for_each_entry(devlink_sb, &devlink->sb_list, list) { err = __sb_port_pool_get_dumpit(msg, state->idx, &idx, devlink, devlink_sb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, flags); if (err == -EOPNOTSUPP) { err = 0; } else if (err) { state->idx = idx; break; } } return err; } int devlink_nl_sb_port_pool_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { return devlink_nl_dumpit(skb, cb, devlink_nl_sb_port_pool_get_dump_one); } static int devlink_sb_port_pool_set(struct devlink_port *devlink_port, unsigned int sb_index, u16 pool_index, u32 threshold, struct netlink_ext_ack *extack) { const struct devlink_ops *ops = devlink_port->devlink->ops; if (ops->sb_port_pool_set) return ops->sb_port_pool_set(devlink_port, sb_index, pool_index, threshold, extack); return -EOPNOTSUPP; } int devlink_nl_sb_port_pool_set_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink_port *devlink_port = info->user_ptr[1]; struct devlink *devlink = info->user_ptr[0]; struct devlink_sb *devlink_sb; u16 pool_index; u32 threshold; int err; devlink_sb = devlink_sb_get_from_info(devlink, info); if (IS_ERR(devlink_sb)) return PTR_ERR(devlink_sb); err = devlink_sb_pool_index_get_from_info(devlink_sb, info, &pool_index); if (err) return err; if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SB_THRESHOLD)) return -EINVAL; threshold = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_THRESHOLD]); return devlink_sb_port_pool_set(devlink_port, devlink_sb->index, pool_index, threshold, info->extack); } static int devlink_nl_sb_tc_pool_bind_fill(struct sk_buff *msg, struct devlink *devlink, struct devlink_port *devlink_port, struct devlink_sb *devlink_sb, u16 tc_index, enum devlink_sb_pool_type pool_type, enum devlink_command cmd, u32 portid, u32 seq, int flags) { const struct devlink_ops *ops = devlink->ops; u16 pool_index; u32 threshold; void *hdr; int err; err = ops->sb_tc_pool_bind_get(devlink_port, devlink_sb->index, tc_index, pool_type, &pool_index, &threshold); if (err) return err; hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); if (!hdr) return -EMSGSIZE; if (devlink_nl_put_handle(msg, devlink)) goto nla_put_failure; if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index)) goto nla_put_failure; if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index)) goto nla_put_failure; if (nla_put_u16(msg, DEVLINK_ATTR_SB_TC_INDEX, tc_index)) goto nla_put_failure; if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_TYPE, pool_type)) goto nla_put_failure; if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index)) goto nla_put_failure; if (nla_put_u32(msg, DEVLINK_ATTR_SB_THRESHOLD, threshold)) goto nla_put_failure; if (ops->sb_occ_tc_port_bind_get) { u32 cur; u32 max; err = ops->sb_occ_tc_port_bind_get(devlink_port, devlink_sb->index, tc_index, pool_type, &cur, &max); if (err && err != -EOPNOTSUPP) return err; if (!err) { if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur)) goto nla_put_failure; if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_MAX, max)) goto nla_put_failure; } } genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } int devlink_nl_sb_tc_pool_bind_get_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink_port *devlink_port = info->user_ptr[1]; struct devlink *devlink = devlink_port->devlink; struct devlink_sb *devlink_sb; struct sk_buff *msg; enum devlink_sb_pool_type pool_type; u16 tc_index; int err; devlink_sb = devlink_sb_get_from_info(devlink, info); if (IS_ERR(devlink_sb)) return PTR_ERR(devlink_sb); err = devlink_sb_pool_type_get_from_info(info, &pool_type); if (err) return err; err = devlink_sb_tc_index_get_from_info(devlink_sb, info, pool_type, &tc_index); if (err) return err; if (!devlink->ops->sb_tc_pool_bind_get) return -EOPNOTSUPP; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink, devlink_port, devlink_sb, tc_index, pool_type, DEVLINK_CMD_SB_TC_POOL_BIND_NEW, info->snd_portid, info->snd_seq, 0); if (err) { nlmsg_free(msg); return err; } return genlmsg_reply(msg, info); } static int __sb_tc_pool_bind_get_dumpit(struct sk_buff *msg, int start, int *p_idx, struct devlink *devlink, struct devlink_sb *devlink_sb, u32 portid, u32 seq, int flags) { struct devlink_port *devlink_port; unsigned long port_index; u16 tc_index; int err; xa_for_each(&devlink->ports, port_index, devlink_port) { for (tc_index = 0; tc_index < devlink_sb->ingress_tc_count; tc_index++) { if (*p_idx < start) { (*p_idx)++; continue; } err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink, devlink_port, devlink_sb, tc_index, DEVLINK_SB_POOL_TYPE_INGRESS, DEVLINK_CMD_SB_TC_POOL_BIND_NEW, portid, seq, flags); if (err) return err; (*p_idx)++; } for (tc_index = 0; tc_index < devlink_sb->egress_tc_count; tc_index++) { if (*p_idx < start) { (*p_idx)++; continue; } err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink, devlink_port, devlink_sb, tc_index, DEVLINK_SB_POOL_TYPE_EGRESS, DEVLINK_CMD_SB_TC_POOL_BIND_NEW, portid, seq, flags); if (err) return err; (*p_idx)++; } } return 0; } static int devlink_nl_sb_tc_pool_bind_get_dump_one(struct sk_buff *msg, struct devlink *devlink, struct netlink_callback *cb, int flags) { struct devlink_nl_dump_state *state = devlink_dump_state(cb); struct devlink_sb *devlink_sb; int idx = 0; int err = 0; if (!devlink->ops->sb_tc_pool_bind_get) return 0; list_for_each_entry(devlink_sb, &devlink->sb_list, list) { err = __sb_tc_pool_bind_get_dumpit(msg, state->idx, &idx, devlink, devlink_sb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, flags); if (err == -EOPNOTSUPP) { err = 0; } else if (err) { state->idx = idx; break; } } return err; } int devlink_nl_sb_tc_pool_bind_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { return devlink_nl_dumpit(skb, cb, devlink_nl_sb_tc_pool_bind_get_dump_one); } static int devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port, unsigned int sb_index, u16 tc_index, enum devlink_sb_pool_type pool_type, u16 pool_index, u32 threshold, struct netlink_ext_ack *extack) { const struct devlink_ops *ops = devlink_port->devlink->ops; if (ops->sb_tc_pool_bind_set) return ops->sb_tc_pool_bind_set(devlink_port, sb_index, tc_index, pool_type, pool_index, threshold, extack); return -EOPNOTSUPP; } int devlink_nl_sb_tc_pool_bind_set_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink_port *devlink_port = info->user_ptr[1]; struct devlink *devlink = info->user_ptr[0]; enum devlink_sb_pool_type pool_type; struct devlink_sb *devlink_sb; u16 tc_index; u16 pool_index; u32 threshold; int err; devlink_sb = devlink_sb_get_from_info(devlink, info); if (IS_ERR(devlink_sb)) return PTR_ERR(devlink_sb); err = devlink_sb_pool_type_get_from_info(info, &pool_type); if (err) return err; err = devlink_sb_tc_index_get_from_info(devlink_sb, info, pool_type, &tc_index); if (err) return err; err = devlink_sb_pool_index_get_from_info(devlink_sb, info, &pool_index); if (err) return err; if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SB_THRESHOLD)) return -EINVAL; threshold = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_THRESHOLD]); return devlink_sb_tc_pool_bind_set(devlink_port, devlink_sb->index, tc_index, pool_type, pool_index, threshold, info->extack); } int devlink_nl_sb_occ_snapshot_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; const struct devlink_ops *ops = devlink->ops; struct devlink_sb *devlink_sb; devlink_sb = devlink_sb_get_from_info(devlink, info); if (IS_ERR(devlink_sb)) return PTR_ERR(devlink_sb); if (ops->sb_occ_snapshot) return ops->sb_occ_snapshot(devlink, devlink_sb->index); return -EOPNOTSUPP; } int devlink_nl_sb_occ_max_clear_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; const struct devlink_ops *ops = devlink->ops; struct devlink_sb *devlink_sb; devlink_sb = devlink_sb_get_from_info(devlink, info); if (IS_ERR(devlink_sb)) return PTR_ERR(devlink_sb); if (ops->sb_occ_max_clear) return ops->sb_occ_max_clear(devlink, devlink_sb->index); return -EOPNOTSUPP; } int devl_sb_register(struct devlink *devlink, unsigned int sb_index, u32 size, u16 ingress_pools_count, u16 egress_pools_count, u16 ingress_tc_count, u16 egress_tc_count) { struct devlink_sb *devlink_sb; lockdep_assert_held(&devlink->lock); if (devlink_sb_index_exists(devlink, sb_index)) return -EEXIST; devlink_sb = kzalloc(sizeof(*devlink_sb), GFP_KERNEL); if (!devlink_sb) return -ENOMEM; devlink_sb->index = sb_index; devlink_sb->size = size; devlink_sb->ingress_pools_count = ingress_pools_count; devlink_sb->egress_pools_count = egress_pools_count; devlink_sb->ingress_tc_count = ingress_tc_count; devlink_sb->egress_tc_count = egress_tc_count; list_add_tail(&devlink_sb->list, &devlink->sb_list); return 0; } EXPORT_SYMBOL_GPL(devl_sb_register); int devlink_sb_register(struct devlink *devlink, unsigned int sb_index, u32 size, u16 ingress_pools_count, u16 egress_pools_count, u16 ingress_tc_count, u16 egress_tc_count) { int err; devl_lock(devlink); err = devl_sb_register(devlink, sb_index, size, ingress_pools_count, egress_pools_count, ingress_tc_count, egress_tc_count); devl_unlock(devlink); return err; } EXPORT_SYMBOL_GPL(devlink_sb_register); void devl_sb_unregister(struct devlink *devlink, unsigned int sb_index) { struct devlink_sb *devlink_sb; lockdep_assert_held(&devlink->lock); devlink_sb = devlink_sb_get_by_index(devlink, sb_index); WARN_ON(!devlink_sb); list_del(&devlink_sb->list); kfree(devlink_sb); } EXPORT_SYMBOL_GPL(devl_sb_unregister); void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index) { devl_lock(devlink); devl_sb_unregister(devlink, sb_index); devl_unlock(devlink); } EXPORT_SYMBOL_GPL(devlink_sb_unregister);
20 19 1 17 13 1 3 7 3 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 // SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/namei.h> #include <linux/io_uring.h> #include <linux/fsnotify.h> #include <uapi/linux/io_uring.h> #include "io_uring.h" #include "sync.h" struct io_sync { struct file *file; loff_t len; loff_t off; int flags; int mode; }; int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in)) return -EINVAL; sync->off = READ_ONCE(sqe->off); sync->len = READ_ONCE(sqe->len); sync->flags = READ_ONCE(sqe->sync_range_flags); req->flags |= REQ_F_FORCE_ASYNC; return 0; } int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags) { struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); int ret; /* sync_file_range always requires a blocking context */ WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); ret = sync_file_range(req->file, sync->off, sync->len, sync->flags); io_req_set_res(req, ret, 0); return IOU_COMPLETE; } int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in)) return -EINVAL; sync->flags = READ_ONCE(sqe->fsync_flags); if (unlikely(sync->flags & ~IORING_FSYNC_DATASYNC)) return -EINVAL; sync->off = READ_ONCE(sqe->off); sync->len = READ_ONCE(sqe->len); req->flags |= REQ_F_FORCE_ASYNC; return 0; } int io_fsync(struct io_kiocb *req, unsigned int issue_flags) { struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); loff_t end = sync->off + sync->len; int ret; /* fsync always requires a blocking context */ WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); ret = vfs_fsync_range(req->file, sync->off, end > 0 ? end : LLONG_MAX, sync->flags & IORING_FSYNC_DATASYNC); io_req_set_res(req, ret, 0); return IOU_COMPLETE; } int io_fallocate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); if (sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in) return -EINVAL; sync->off = READ_ONCE(sqe->off); sync->len = READ_ONCE(sqe->addr); sync->mode = READ_ONCE(sqe->len); req->flags |= REQ_F_FORCE_ASYNC; return 0; } int io_fallocate(struct io_kiocb *req, unsigned int issue_flags) { struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); int ret; /* fallocate always requiring blocking context */ WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); ret = vfs_fallocate(req->file, sync->mode, sync->off, sync->len); if (ret >= 0) fsnotify_modify(req->file); io_req_set_res(req, ret, 0); return IOU_COMPLETE; }
1 18 3 10 18 1 1 1 1 1 1 1 1 1 1 1 1 1 14 14 16 15 1 18 18 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185 7186 7187 7188 7189 7190 7191 7192 7193 7194 7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 7216 7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235 7236 7237 7238 7239 7240 7241 7242 7243 7244 7245 7246 7247 7248 7249 7250 7251 7252 7253 7254 7255 7256 7257 7258 7259 7260 7261 7262 7263 7264 7265 7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276 7277 7278 7279 7280 7281 7282 7283 7284 7285 7286 7287 7288 7289 7290 7291 7292 7293 7294 7295 7296 7297 7298 7299 7300 7301 7302 7303 7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315 7316 7317 7318 7319 7320 7321 7322 7323 7324 7325 7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338 7339 7340 7341 7342 7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360 7361 7362 7363 7364 7365 7366 7367 7368 7369 7370 7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 7381 7382 7383 7384 7385 7386 7387 7388 7389 7390 7391 7392 7393 7394 7395 7396 7397 7398 7399 7400 7401 7402 7403 7404 7405 7406 7407 7408 7409 7410 7411 7412 7413 7414 7415 7416 7417 7418 7419 7420 7421 7422 7423 7424 7425 7426 7427 7428 7429 7430 7431 7432 7433 7434 7435 7436 7437 7438 7439 7440 7441 7442 7443 7444 7445 7446 7447 7448 7449 7450 7451 7452 7453 7454 7455 7456 7457 7458 7459 7460 7461 7462 7463 7464 7465 7466 7467 7468 7469 7470 7471 7472 7473 7474 7475 7476 7477 7478 7479 7480 7481 7482 7483 7484 7485 7486 7487 7488 7489 7490 7491 7492 7493 7494 7495 7496 7497 7498 7499 7500 7501 7502 7503 7504 7505 7506 7507 7508 7509 7510 7511 7512 7513 7514 7515 7516 7517 7518 7519 7520 7521 7522 7523 7524 7525 7526 7527 7528 7529 7530 7531 7532 7533 7534 7535 7536 7537 7538 7539 7540 7541 7542 7543 7544 7545 7546 7547 7548 7549 7550 7551 7552 7553 7554 7555 7556 7557 7558 7559 7560 7561 7562 7563 7564 7565 7566 7567 7568 7569 7570 7571 7572 7573 7574 7575 7576 7577 7578 7579 7580 7581 7582 7583 7584 7585 7586 7587 7588 7589 7590 7591 7592 7593 7594 7595 7596 7597 7598 7599 7600 7601 7602 7603 7604 7605 7606 7607 7608 7609 7610 7611 7612 7613 7614 7615 7616 7617 7618 7619 7620 7621 7622 7623 7624 7625 7626 7627 7628 7629 7630 7631 7632 7633 7634 7635 7636 7637 7638 7639 7640 7641 7642 7643 7644 7645 7646 7647 7648 7649 7650 7651 7652 7653 7654 7655 7656 7657 7658 7659 7660 7661 7662 7663 7664 7665 7666 7667 7668 7669 7670 7671 7672 7673 7674 7675 7676 7677 7678 7679 7680 7681 7682 7683 7684 7685 7686 7687 7688 7689 7690 7691 7692 7693 7694 7695 7696 7697 7698 7699 7700 7701 7702 7703 7704 7705 7706 7707 7708 7709 7710 7711 7712 7713 7714 7715 7716 7717 7718 7719 7720 7721 7722 7723 7724 7725 7726 7727 7728 7729 7730 7731 7732 7733 7734 7735 7736 7737 7738 7739 7740 7741 7742 7743 7744 7745 7746 7747 7748 7749 7750 7751 7752 7753 7754 7755 7756 7757 7758 7759 7760 7761 7762 7763 7764 7765 7766 7767 7768 7769 7770 7771 7772 7773 7774 7775 7776 7777 7778 7779 7780 7781 7782 7783 7784 7785 7786 7787 7788 7789 7790 7791 7792 7793 7794 7795 7796 7797 7798 7799 7800 7801 7802 7803 7804 7805 7806 7807 7808 7809 7810 7811 7812 7813 7814 7815 7816 7817 7818 7819 7820 7821 7822 7823 7824 7825 7826 7827 7828 7829 7830 7831 7832 7833 7834 7835 7836 7837 7838 7839 7840 7841 7842 7843 7844 7845 7846 7847 7848 7849 7850 7851 7852 7853 7854 7855 7856 7857 7858 7859 7860 7861 7862 7863 7864 7865 7866 7867 7868 7869 7870 7871 7872 7873 7874 7875 7876 7877 7878 7879 7880 7881 7882 7883 7884 7885 7886 7887 7888 7889 7890 7891 7892 7893 7894 7895 7896 7897 7898 7899 7900 7901 7902 7903 7904 7905 7906 7907 7908 7909 7910 7911 7912 7913 7914 7915 7916 7917 7918 7919 7920 7921 7922 7923 7924 7925 7926 7927 7928 7929 7930 7931 7932 7933 7934 7935 7936 7937 7938 7939 7940 7941 7942 7943 7944 7945 7946 7947 7948 7949 7950 7951 7952 7953 7954 7955 7956 7957 7958 7959 7960 7961 7962 7963 7964 7965 7966 7967 7968 7969 7970 7971 7972 7973 7974 7975 7976 7977 7978 7979 7980 7981 7982 7983 7984 7985 7986 7987 7988 7989 7990 7991 7992 7993 7994 7995 7996 7997 7998 7999 8000 8001 8002 8003 8004 8005 8006 8007 8008 8009 8010 8011 8012 8013 8014 8015 8016 8017 8018 8019 8020 8021 8022 8023 8024 8025 8026 8027 8028 8029 8030 8031 8032 8033 8034 8035 8036 8037 8038 8039 8040 8041 8042 8043 8044 8045 8046 8047 8048 8049 8050 8051 8052 8053 8054 8055 8056 8057 8058 8059 8060 8061 8062 8063 8064 8065 8066 8067 8068 8069 8070 8071 8072 8073 8074 8075 8076 8077 8078 8079 8080 8081 8082 8083 8084 8085 8086 8087 8088 8089 8090 8091 8092 8093 8094 8095 8096 8097 8098 8099 8100 8101 8102 8103 8104 8105 8106 8107 8108 8109 8110 8111 8112 8113 8114 8115 8116 8117 8118 8119 8120 8121 8122 8123 8124 8125 8126 8127 8128 8129 8130 8131 8132 8133 8134 8135 8136 8137 8138 8139 8140 8141 8142 8143 8144 8145 8146 8147 8148 8149 8150 8151 8152 8153 8154 8155 8156 8157 8158 8159 8160 8161 8162 8163 8164 8165 8166 8167 8168 8169 8170 8171 8172 8173 8174 8175 8176 8177 8178 8179 8180 8181 8182 8183 8184 8185 8186 8187 8188 8189 8190 8191 8192 8193 8194 8195 8196 8197 8198 8199 8200 8201 8202 8203 8204 8205 8206 8207 8208 8209 8210 8211 8212 8213 8214 8215 8216 8217 8218 8219 8220 8221 8222 8223 8224 8225 8226 8227 8228 8229 8230 8231 8232 8233 8234 8235 8236 8237 8238 8239 8240 8241 8242 8243 8244 8245 8246 8247 8248 8249 8250 8251 8252 8253 8254 8255 8256 8257 8258 8259 8260 8261 8262 8263 8264 8265 8266 8267 8268 8269 8270 8271 8272 8273 8274 8275 8276 8277 8278 8279 8280 8281 8282 8283 8284 8285 8286 8287 8288 8289 8290 8291 8292 8293 8294 8295 8296 8297 8298 8299 8300 8301 8302 8303 8304 8305 8306 8307 8308 8309 8310 8311 8312 8313 8314 8315 8316 8317 8318 8319 8320 8321 8322 8323 8324 8325 8326 8327 8328 8329 8330 8331 8332 8333 8334 8335 8336 8337 8338 8339 8340 8341 8342 8343 8344 8345 8346 8347 8348 8349 8350 8351 8352 8353 8354 8355 8356 8357 8358 8359 8360 8361 8362 8363 8364 8365 8366 8367 8368 8369 8370 8371 8372 8373 8374 8375 8376 8377 8378 8379 8380 8381 8382 8383 8384 8385 8386 8387 8388 8389 8390 8391 8392 8393 8394 8395 8396 8397 8398 8399 8400 8401 8402 8403 8404 8405 8406 8407 8408 8409 8410 8411 8412 8413 8414 8415 8416 8417 8418 8419 8420 8421 8422 8423 8424 8425 8426 8427 8428 8429 8430 8431 8432 8433 8434 8435 8436 8437 8438 8439 8440 8441 8442 8443 8444 8445 8446 8447 8448 8449 8450 8451 8452 8453 8454 8455 8456 8457 8458 8459 8460 8461 8462 8463 8464 8465 8466 8467 8468 8469 8470 8471 8472 8473 8474 8475 8476 8477 8478 8479 8480 8481 8482 8483 8484 8485 8486 8487 8488 8489 8490 8491 8492 8493 8494 8495 8496 8497 8498 8499 8500 8501 8502 8503 8504 8505 8506 8507 8508 8509 8510 8511 8512 8513 8514 8515 8516 8517 8518 8519 8520 8521 8522 8523 8524 8525 8526 8527 8528 8529 8530 8531 8532 8533 8534 8535 8536 8537 8538 8539 8540 8541 8542 8543 8544 8545 8546 8547 8548 8549 8550 8551 8552 8553 8554 8555 8556 8557 8558 8559 8560 8561 8562 8563 8564 8565 8566 8567 8568 8569 8570 8571 8572 8573 8574 8575 8576 8577 8578 8579 8580 8581 8582 8583 8584 8585 8586 8587 8588 8589 8590 8591 8592 8593 8594 8595 8596 8597 8598 8599 8600 8601 8602 8603 8604 8605 8606 8607 8608 8609 8610 8611 8612 8613 8614 8615 8616 8617 8618 8619 8620 8621 8622 8623 8624 8625 8626 8627 8628 8629 8630 8631 8632 8633 8634 8635 8636 8637 8638 8639 8640 8641 8642 8643 8644 8645 8646 8647 8648 8649 8650 8651 8652 8653 8654 8655 8656 8657 8658 8659 8660 8661 8662 8663 8664 8665 8666 8667 8668 8669 8670 8671 8672 8673 8674 8675 8676 8677 8678 8679 8680 8681 8682 8683 8684 8685 8686 8687 8688 8689 8690 8691 8692 8693 8694 8695 8696 8697 8698 8699 8700 8701 8702 8703 8704 8705 8706 8707 8708 8709 8710 8711 8712 8713 8714 8715 8716 8717 8718 8719 8720 8721 8722 8723 8724 8725 8726 8727 8728 8729 8730 8731 8732 8733 8734 8735 8736 8737 8738 8739 8740 8741 8742 8743 8744 8745 8746 8747 8748 8749 8750 8751 8752 8753 8754 8755 8756 8757 8758 8759 8760 8761 8762 8763 8764 8765 8766 8767 8768 8769 8770 8771 8772 8773 8774 8775 8776 8777 8778 8779 8780 8781 8782 8783 8784 8785 8786 8787 8788 8789 8790 8791 8792 8793 8794 8795 8796 8797 8798 8799 8800 8801 8802 8803 8804 8805 8806 8807 8808 8809 8810 8811 8812 8813 8814 8815 8816 8817 8818 8819 8820 8821 8822 8823 8824 8825 8826 8827 8828 8829 8830 8831 8832 8833 8834 8835 8836 8837 8838 8839 8840 8841 8842 8843 8844 8845 8846 8847 8848 8849 8850 8851 8852 8853 8854 8855 8856 8857 8858 8859 8860 8861 8862 8863 8864 8865 8866 8867 8868 8869 8870 8871 8872 8873 8874 8875 8876 8877 8878 8879 8880 8881 8882 8883 8884 8885 8886 8887 8888 8889 8890 8891 8892 8893 8894 8895 8896 8897 8898 8899 8900 8901 8902 8903 8904 8905 8906 8907 8908 8909 8910 8911 8912 8913 8914 8915 8916 8917 8918 8919 8920 8921 8922 8923 8924 8925 8926 8927 8928 8929 8930 8931 8932 8933 8934 8935 8936 8937 8938 8939 8940 8941 8942 8943 8944 8945 8946 8947 8948 8949 8950 8951 8952 8953 8954 8955 8956 8957 8958 8959 8960 8961 8962 8963 8964 8965 8966 8967 8968 8969 8970 8971 8972 8973 8974 8975 8976 8977 8978 8979 8980 8981 8982 8983 8984 8985 8986 8987 8988 8989 8990 8991 8992 8993 8994 8995 8996 8997 8998 8999 9000 9001 9002 9003 9004 9005 9006 9007 9008 9009 9010 9011 9012 9013 9014 9015 9016 9017 9018 9019 9020 9021 9022 9023 9024 9025 9026 9027 9028 9029 9030 9031 9032 9033 9034 9035 9036 9037 9038 9039 9040 9041 9042 9043 9044 9045 9046 9047 9048 9049 9050 9051 9052 9053 9054 9055 9056 9057 9058 9059 9060 9061 9062 9063 9064 9065 9066 9067 9068 9069 9070 9071 9072 9073 9074 9075 9076 9077 9078 9079 9080 9081 9082 9083 9084 9085 9086 9087 9088 9089 9090 9091 9092 9093 9094 9095 9096 9097 9098 9099 9100 9101 9102 9103 9104 9105 9106 9107 9108 9109 9110 9111 9112 9113 9114 9115 9116 9117 9118 9119 9120 9121 9122 9123 9124 9125 9126 9127 9128 9129 9130 9131 9132 9133 9134 9135 9136 9137 9138 9139 9140 9141 9142 9143 9144 9145 9146 9147 9148 9149 9150 9151 9152 9153 9154 9155 9156 9157 9158 9159 9160 9161 9162 9163 9164 9165 9166 9167 9168 9169 9170 9171 9172 9173 9174 9175 9176 9177 9178 9179 9180 9181 9182 9183 9184 9185 9186 9187 9188 9189 9190 9191 9192 9193 9194 9195 9196 9197 9198 9199 9200 9201 9202 9203 9204 9205 9206 9207 9208 9209 9210 9211 9212 9213 9214 9215 9216 9217 9218 9219 9220 9221 9222 9223 9224 9225 9226 9227 9228 9229 9230 9231 9232 9233 9234 9235 9236 9237 9238 9239 9240 9241 9242 9243 9244 9245 9246 9247 9248 9249 9250 9251 9252 9253 9254 9255 9256 9257 9258 9259 9260 9261 9262 9263 9264 9265 9266 9267 9268 9269 9270 9271 9272 9273 9274 9275 9276 9277 9278 9279 9280 9281 9282 9283 9284 9285 9286 9287 9288 9289 9290 9291 9292 9293 9294 9295 9296 9297 9298 9299 9300 9301 9302 9303 9304 9305 9306 9307 9308 9309 9310 9311 9312 9313 9314 9315 9316 9317 9318 9319 9320 9321 9322 9323 9324 9325 9326 9327 9328 9329 9330 9331 9332 9333 9334 9335 9336 9337 9338 9339 9340 9341 9342 9343 9344 9345 9346 9347 9348 9349 9350 9351 9352 9353 9354 9355 9356 9357 9358 9359 9360 9361 9362 9363 9364 9365 9366 9367 9368 9369 9370 9371 9372 9373 9374 9375 9376 9377 9378 9379 9380 9381 9382 9383 9384 9385 9386 9387 9388 9389 9390 9391 9392 9393 9394 9395 9396 9397 9398 9399 9400 9401 9402 9403 9404 9405 9406 9407 9408 9409 9410 9411 9412 9413 9414 9415 9416 9417 9418 9419 9420 9421 9422 9423 9424 9425 9426 9427 9428 9429 9430 9431 9432 9433 9434 9435 9436 9437 9438 9439 9440 9441 9442 9443 9444 9445 9446 9447 9448 9449 9450 9451 9452 9453 9454 9455 9456 9457 9458 9459 9460 9461 9462 9463 9464 9465 9466 9467 9468 9469 9470 9471 9472 9473 9474 9475 9476 9477 9478 9479 9480 9481 9482 9483 9484 9485 9486 9487 9488 9489 9490 9491 9492 9493 9494 9495 9496 9497 9498 9499 9500 9501 9502 9503 9504 9505 9506 9507 9508 9509 9510 9511 9512 9513 9514 9515 9516 9517 9518 9519 9520 9521 9522 9523 9524 9525 9526 9527 9528 9529 9530 9531 9532 9533 9534 9535 9536 9537 9538 9539 9540 9541 9542 9543 9544 9545 9546 9547 9548 9549 9550 9551 9552 9553 9554 9555 9556 9557 9558 9559 9560 9561 9562 9563 9564 9565 9566 9567 9568 9569 9570 9571 9572 9573 9574 9575 9576 9577 9578 9579 9580 9581 9582 9583 9584 9585 9586 9587 9588 9589 9590 9591 9592 9593 9594 9595 9596 9597 9598 9599 9600 9601 9602 9603 9604 9605 9606 9607 9608 9609 9610 9611 9612 9613 9614 9615 9616 9617 9618 9619 9620 9621 9622 9623 9624 9625 9626 9627 9628 9629 9630 9631 9632 9633 9634 9635 9636 9637 9638 9639 9640 9641 9642 9643 9644 9645 9646 9647 9648 9649 9650 9651 9652 9653 9654 9655 9656 9657 9658 9659 9660 9661 9662 9663 9664 9665 9666 9667 9668 9669 9670 9671 9672 9673 9674 9675 9676 9677 9678 9679 9680 9681 9682 9683 9684 9685 9686 9687 9688 9689 9690 9691 9692 9693 9694 9695 9696 9697 9698 9699 9700 9701 9702 9703 9704 9705 9706 9707 9708 9709 9710 9711 9712 9713 9714 9715 9716 9717 9718 9719 9720 9721 9722 9723 9724 9725 9726 9727 9728 9729 9730 9731 9732 9733 9734 9735 9736 9737 9738 9739 9740 9741 9742 9743 9744 9745 9746 9747 9748 9749 9750 9751 9752 9753 9754 9755 9756 9757 9758 9759 9760 9761 9762 9763 9764 9765 9766 9767 9768 9769 9770 9771 9772 9773 9774 9775 9776 9777 9778 9779 9780 9781 9782 9783 9784 9785 9786 9787 9788 9789 9790 9791 9792 9793 9794 9795 9796 9797 9798 9799 9800 9801 9802 9803 9804 9805 9806 9807 9808 9809 9810 9811 9812 9813 9814 9815 9816 9817 9818 9819 9820 9821 9822 9823 9824 9825 9826 9827 9828 9829 9830 9831 9832 9833 9834 9835 9836 9837 9838 9839 9840 9841 9842 9843 9844 9845 9846 9847 9848 9849 9850 9851 9852 9853 9854 9855 9856 9857 9858 9859 9860 9861 9862 9863 9864 9865 9866 9867 9868 9869 9870 9871 9872 9873 9874 9875 9876 9877 9878 9879 9880 9881 9882 9883 9884 9885 9886 9887 9888 9889 9890 9891 9892 9893 9894 9895 9896 9897 9898 9899 9900 9901 9902 9903 9904 9905 9906 9907 9908 9909 9910 9911 9912 9913 9914 9915 9916 9917 9918 9919 9920 9921 9922 9923 9924 9925 9926 9927 9928 9929 9930 9931 9932 9933 9934 9935 9936 9937 9938 9939 9940 9941 9942 9943 9944 9945 9946 9947 9948 9949 9950 9951 9952 9953 9954 9955 9956 9957 9958 9959 9960 9961 9962 9963 9964 9965 9966 9967 9968 9969 9970 9971 9972 9973 9974 9975 9976 9977 9978 9979 9980 9981 9982 9983 9984 9985 9986 9987 9988 9989 9990 9991 9992 9993 9994 9995 9996 9997 9998 9999 10000 10001 10002 10003 10004 10005 10006 10007 10008 10009 10010 10011 10012 10013 10014 10015 10016 10017 10018 10019 10020 10021 10022 10023 10024 10025 10026 10027 10028 10029 10030 10031 10032 10033 10034 10035 10036 10037 10038 10039 10040 10041 10042 10043 10044 10045 10046 10047 10048 10049 10050 10051 10052 10053 10054 10055 10056 10057 10058 10059 10060 10061 10062 10063 10064 10065 10066 10067 10068 10069 10070 10071 10072 10073 10074 10075 10076 10077 10078 10079 10080 10081 10082 10083 10084 10085 10086 10087 10088 10089 10090 10091 10092 10093 10094 10095 10096 10097 10098 10099 10100 10101 10102 10103 10104 10105 10106 10107 10108 10109 10110 10111 10112 10113 10114 10115 10116 10117 10118 10119 10120 10121 10122 10123 10124 10125 10126 10127 10128 10129 10130 10131 10132 10133 10134 10135 10136 10137 10138 10139 10140 10141 10142 10143 10144 10145 10146 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2014 Realtek Semiconductor Corp. All rights reserved. */ #include <linux/signal.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/phy.h> #include <linux/usb.h> #include <linux/crc32.h> #include <linux/if_vlan.h> #include <linux/uaccess.h> #include <linux/list.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <net/ip6_checksum.h> #include <uapi/linux/mdio.h> #include <linux/mdio.h> #include <linux/usb/cdc.h> #include <linux/suspend.h> #include <linux/atomic.h> #include <linux/acpi.h> #include <linux/firmware.h> #include <crypto/sha2.h> #include <linux/usb/r8152.h> #include <net/gso.h> /* Information for net-next */ #define NETNEXT_VERSION "12" /* Information for net */ #define NET_VERSION "13" #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" #define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" #define MODULENAME "r8152" #define R8152_PHY_ID 32 #define PLA_IDR 0xc000 #define PLA_RCR 0xc010 #define PLA_RCR1 0xc012 #define PLA_RMS 0xc016 #define PLA_RXFIFO_CTRL0 0xc0a0 #define PLA_RXFIFO_FULL 0xc0a2 #define PLA_RXFIFO_CTRL1 0xc0a4 #define PLA_RX_FIFO_FULL 0xc0a6 #define PLA_RXFIFO_CTRL2 0xc0a8 #define PLA_RX_FIFO_EMPTY 0xc0aa #define PLA_DMY_REG0 0xc0b0 #define PLA_FMC 0xc0b4 #define PLA_CFG_WOL 0xc0b6 #define PLA_TEREDO_CFG 0xc0bc #define PLA_TEREDO_WAKE_BASE 0xc0c4 #define PLA_MAR 0xcd00 #define PLA_BACKUP 0xd000 #define PLA_BDC_CR 0xd1a0 #define PLA_TEREDO_TIMER 0xd2cc #define PLA_REALWOW_TIMER 0xd2e8 #define PLA_UPHY_TIMER 0xd388 #define PLA_SUSPEND_FLAG 0xd38a #define PLA_INDICATE_FALG 0xd38c #define PLA_MACDBG_PRE 0xd38c /* RTL_VER_04 only */ #define PLA_MACDBG_POST 0xd38e /* RTL_VER_04 only */ #define PLA_EXTRA_STATUS 0xd398 #define PLA_GPHY_CTRL 0xd3ae #define PLA_POL_GPIO_CTRL 0xdc6a #define PLA_EFUSE_DATA 0xdd00 #define PLA_EFUSE_CMD 0xdd02 #define PLA_LEDSEL 0xdd90 #define PLA_LED_FEATURE 0xdd92 #define PLA_PHYAR 0xde00 #define PLA_BOOT_CTRL 0xe004 #define PLA_LWAKE_CTRL_REG 0xe007 #define PLA_GPHY_INTR_IMR 0xe022 #define PLA_EEE_CR 0xe040 #define PLA_EEE_TXTWSYS 0xe04c #define PLA_EEE_TXTWSYS_2P5G 0xe058 #define PLA_EEEP_CR 0xe080 #define PLA_MAC_PWR_CTRL 0xe0c0 #define PLA_MAC_PWR_CTRL2 0xe0ca #define PLA_MAC_PWR_CTRL3 0xe0cc #define PLA_MAC_PWR_CTRL4 0xe0ce #define PLA_WDT6_CTRL 0xe428 #define PLA_TCR0 0xe610 #define PLA_TCR1 0xe612 #define PLA_MTPS 0xe615 #define PLA_TXFIFO_CTRL 0xe618 #define PLA_TXFIFO_FULL 0xe61a #define PLA_RSTTALLY 0xe800 #define PLA_CR 0xe813 #define PLA_CRWECR 0xe81c #define PLA_CONFIG12 0xe81e /* CONFIG1, CONFIG2 */ #define PLA_CONFIG34 0xe820 /* CONFIG3, CONFIG4 */ #define PLA_CONFIG5 0xe822 #define PLA_PHY_PWR 0xe84c #define PLA_OOB_CTRL 0xe84f #define PLA_CPCR 0xe854 #define PLA_MISC_0 0xe858 #define PLA_MISC_1 0xe85a #define PLA_OCP_GPHY_BASE 0xe86c #define PLA_TALLYCNT 0xe890 #define PLA_SFF_STS_7 0xe8de #define PLA_PHYSTATUS 0xe908 #define PLA_CONFIG6 0xe90a /* CONFIG6 */ #define PLA_USB_CFG 0xe952 #define PLA_BP_BA 0xfc26 #define PLA_BP_0 0xfc28 #define PLA_BP_1 0xfc2a #define PLA_BP_2 0xfc2c #define PLA_BP_3 0xfc2e #define PLA_BP_4 0xfc30 #define PLA_BP_5 0xfc32 #define PLA_BP_6 0xfc34 #define PLA_BP_7 0xfc36 #define PLA_BP_EN 0xfc38 #define USB_USB2PHY 0xb41e #define USB_SSPHYLINK1 0xb426 #define USB_SSPHYLINK2 0xb428 #define USB_L1_CTRL 0xb45e #define USB_U2P3_CTRL 0xb460 #define USB_CSR_DUMMY1 0xb464 #define USB_CSR_DUMMY2 0xb466 #define USB_DEV_STAT 0xb808 #define USB_CONNECT_TIMER 0xcbf8 #define USB_MSC_TIMER 0xcbfc #define USB_BURST_SIZE 0xcfc0 #define USB_FW_FIX_EN0 0xcfca #define USB_FW_FIX_EN1 0xcfcc #define USB_LPM_CONFIG 0xcfd8 #define USB_ECM_OPTION 0xcfee #define USB_CSTMR 0xcfef /* RTL8153A */ #define USB_MISC_2 0xcfff #define USB_ECM_OP 0xd26b #define USB_GPHY_CTRL 0xd284 #define USB_SPEED_OPTION 0xd32a #define USB_FW_CTRL 0xd334 /* RTL8153B */ #define USB_FC_TIMER 0xd340 #define USB_USB_CTRL 0xd406 #define USB_PHY_CTRL 0xd408 #define USB_TX_AGG 0xd40a #define USB_RX_BUF_TH 0xd40c #define USB_USB_TIMER 0xd428 #define USB_RX_EARLY_TIMEOUT 0xd42c #define USB_RX_EARLY_SIZE 0xd42e #define USB_PM_CTRL_STATUS 0xd432 /* RTL8153A */ #define USB_RX_EXTRA_AGGR_TMR 0xd432 /* RTL8153B */ #define USB_TX_DMA 0xd434 #define USB_UPT_RXDMA_OWN 0xd437 #define USB_UPHY3_MDCMDIO 0xd480 #define USB_TOLERANCE 0xd490 #define USB_LPM_CTRL 0xd41a #define USB_BMU_RESET 0xd4b0 #define USB_BMU_CONFIG 0xd4b4 #define USB_U1U2_TIMER 0xd4da #define USB_FW_TASK 0xd4e8 /* RTL8153B */ #define USB_RX_AGGR_NUM 0xd4ee #define USB_UPS_CTRL 0xd800 #define USB_POWER_CUT 0xd80a #define USB_MISC_0 0xd81a #define USB_MISC_1 0xd81f #define USB_AFE_CTRL2 0xd824 #define USB_UPHY_XTAL 0xd826 #define USB_UPS_CFG 0xd842 #define USB_UPS_FLAGS 0xd848 #define USB_WDT1_CTRL 0xe404 #define USB_WDT11_CTRL 0xe43c #define USB_BP_BA PLA_BP_BA #define USB_BP_0 PLA_BP_0 #define USB_BP_1 PLA_BP_1 #define USB_BP_2 PLA_BP_2 #define USB_BP_3 PLA_BP_3 #define USB_BP_4 PLA_BP_4 #define USB_BP_5 PLA_BP_5 #define USB_BP_6 PLA_BP_6 #define USB_BP_7 PLA_BP_7 #define USB_BP_EN PLA_BP_EN /* RTL8153A */ #define USB_BP_8 0xfc38 /* RTL8153B */ #define USB_BP_9 0xfc3a #define USB_BP_10 0xfc3c #define USB_BP_11 0xfc3e #define USB_BP_12 0xfc40 #define USB_BP_13 0xfc42 #define USB_BP_14 0xfc44 #define USB_BP_15 0xfc46 #define USB_BP2_EN 0xfc48 /* OCP Registers */ #define OCP_ALDPS_CONFIG 0x2010 #define OCP_EEE_CONFIG1 0x2080 #define OCP_EEE_CONFIG2 0x2092 #define OCP_EEE_CONFIG3 0x2094 #define OCP_BASE_MII 0xa400 #define OCP_EEE_AR 0xa41a #define OCP_EEE_DATA 0xa41c #define OCP_PHY_STATUS 0xa420 #define OCP_INTR_EN 0xa424 #define OCP_NCTL_CFG 0xa42c #define OCP_POWER_CFG 0xa430 #define OCP_EEE_CFG 0xa432 #define OCP_SRAM_ADDR 0xa436 #define OCP_SRAM_DATA 0xa438 #define OCP_DOWN_SPEED 0xa442 #define OCP_EEE_ABLE 0xa5c4 #define OCP_EEE_ADV 0xa5d0 #define OCP_EEE_LPABLE 0xa5d2 #define OCP_10GBT_CTRL 0xa5d4 #define OCP_10GBT_STAT 0xa5d6 #define OCP_EEE_ADV2 0xa6d4 #define OCP_PHY_STATE 0xa708 /* nway state for 8153 */ #define OCP_PHY_PATCH_STAT 0xb800 #define OCP_PHY_PATCH_CMD 0xb820 #define OCP_PHY_LOCK 0xb82e #define OCP_ADC_IOFFSET 0xbcfc #define OCP_ADC_CFG 0xbc06 #define OCP_SYSCLK_CFG 0xc416 /* SRAM Register */ #define SRAM_GREEN_CFG 0x8011 #define SRAM_LPF_CFG 0x8012 #define SRAM_GPHY_FW_VER 0x801e #define SRAM_10M_AMP1 0x8080 #define SRAM_10M_AMP2 0x8082 #define SRAM_IMPEDANCE 0x8084 #define SRAM_PHY_LOCK 0xb82e /* PLA_RCR */ #define RCR_AAP 0x00000001 #define RCR_APM 0x00000002 #define RCR_AM 0x00000004 #define RCR_AB 0x00000008 #define RCR_ACPT_ALL (RCR_AAP | RCR_APM | RCR_AM | RCR_AB) #define SLOT_EN BIT(11) /* PLA_RCR1 */ #define OUTER_VLAN BIT(7) #define INNER_VLAN BIT(6) /* PLA_RXFIFO_CTRL0 */ #define RXFIFO_THR1_NORMAL 0x00080002 #define RXFIFO_THR1_OOB 0x01800003 /* PLA_RXFIFO_FULL */ #define RXFIFO_FULL_MASK 0xfff /* PLA_RXFIFO_CTRL1 */ #define RXFIFO_THR2_FULL 0x00000060 #define RXFIFO_THR2_HIGH 0x00000038 #define RXFIFO_THR2_OOB 0x0000004a #define RXFIFO_THR2_NORMAL 0x00a0 /* PLA_RXFIFO_CTRL2 */ #define RXFIFO_THR3_FULL 0x00000078 #define RXFIFO_THR3_HIGH 0x00000048 #define RXFIFO_THR3_OOB 0x0000005a #define RXFIFO_THR3_NORMAL 0x0110 /* PLA_TXFIFO_CTRL */ #define TXFIFO_THR_NORMAL 0x00400008 #define TXFIFO_THR_NORMAL2 0x01000008 /* PLA_DMY_REG0 */ #define ECM_ALDPS 0x0002 /* PLA_FMC */ #define FMC_FCR_MCU_EN 0x0001 /* PLA_EEEP_CR */ #define EEEP_CR_EEEP_TX 0x0002 /* PLA_WDT6_CTRL */ #define WDT6_SET_MODE 0x0010 /* PLA_TCR0 */ #define TCR0_TX_EMPTY 0x0800 #define TCR0_AUTO_FIFO 0x0080 /* PLA_TCR1 */ #define VERSION_MASK 0x7cf0 #define IFG_MASK (BIT(3) | BIT(9) | BIT(8)) #define IFG_144NS BIT(9) #define IFG_96NS (BIT(9) | BIT(8)) /* PLA_MTPS */ #define MTPS_JUMBO (12 * 1024 / 64) #define MTPS_DEFAULT (6 * 1024 / 64) /* PLA_RSTTALLY */ #define TALLY_RESET 0x0001 /* PLA_CR */ #define CR_RST 0x10 #define CR_RE 0x08 #define CR_TE 0x04 /* PLA_CRWECR */ #define CRWECR_NORAML 0x00 #define CRWECR_CONFIG 0xc0 /* PLA_OOB_CTRL */ #define NOW_IS_OOB 0x80 #define TXFIFO_EMPTY 0x20 #define RXFIFO_EMPTY 0x10 #define LINK_LIST_READY 0x02 #define DIS_MCU_CLROOB 0x01 #define FIFO_EMPTY (TXFIFO_EMPTY | RXFIFO_EMPTY) /* PLA_MISC_1 */ #define RXDY_GATED_EN 0x0008 /* PLA_SFF_STS_7 */ #define RE_INIT_LL 0x8000 #define MCU_BORW_EN 0x4000 /* PLA_CPCR */ #define FLOW_CTRL_EN BIT(0) #define CPCR_RX_VLAN 0x0040 /* PLA_CFG_WOL */ #define MAGIC_EN 0x0001 /* PLA_TEREDO_CFG */ #define TEREDO_SEL 0x8000 #define TEREDO_WAKE_MASK 0x7f00 #define TEREDO_RS_EVENT_MASK 0x00fe #define OOB_TEREDO_EN 0x0001 /* PLA_BDC_CR */ #define ALDPS_PROXY_MODE 0x0001 /* PLA_EFUSE_CMD */ #define EFUSE_READ_CMD BIT(15) #define EFUSE_DATA_BIT16 BIT(7) /* PLA_CONFIG34 */ #define LINK_ON_WAKE_EN 0x0010 #define LINK_OFF_WAKE_EN 0x0008 /* PLA_CONFIG6 */ #define LANWAKE_CLR_EN BIT(0) /* PLA_USB_CFG */ #define EN_XG_LIP BIT(1) #define EN_G_LIP BIT(2) /* PLA_CONFIG5 */ #define BWF_EN 0x0040 #define MWF_EN 0x0020 #define UWF_EN 0x0010 #define LAN_WAKE_EN 0x0002 /* PLA_LED_FEATURE */ #define LED_MODE_MASK 0x0700 /* PLA_PHY_PWR */ #define TX_10M_IDLE_EN 0x0080 #define PFM_PWM_SWITCH 0x0040 #define TEST_IO_OFF BIT(4) /* PLA_MAC_PWR_CTRL */ #define D3_CLK_GATED_EN 0x00004000 #define MCU_CLK_RATIO 0x07010f07 #define MCU_CLK_RATIO_MASK 0x0f0f0f0f #define ALDPS_SPDWN_RATIO 0x0f87 /* PLA_MAC_PWR_CTRL2 */ #define EEE_SPDWN_RATIO 0x8007 #define MAC_CLK_SPDWN_EN BIT(15) #define EEE_SPDWN_RATIO_MASK 0xff /* PLA_MAC_PWR_CTRL3 */ #define PLA_MCU_SPDWN_EN BIT(14) #define PKT_AVAIL_SPDWN_EN 0x0100 #define SUSPEND_SPDWN_EN 0x0004 #define U1U2_SPDWN_EN 0x0002 #define L1_SPDWN_EN 0x0001 /* PLA_MAC_PWR_CTRL4 */ #define PWRSAVE_SPDWN_EN 0x1000 #define RXDV_SPDWN_EN 0x0800 #define TX10MIDLE_EN 0x0100 #define IDLE_SPDWN_EN BIT(6) #define TP100_SPDWN_EN 0x0020 #define TP500_SPDWN_EN 0x0010 #define TP1000_SPDWN_EN 0x0008 #define EEE_SPDWN_EN 0x0001 /* PLA_GPHY_INTR_IMR */ #define GPHY_STS_MSK 0x0001 #define SPEED_DOWN_MSK 0x0002 #define SPDWN_RXDV_MSK 0x0004 #define SPDWN_LINKCHG_MSK 0x0008 /* PLA_PHYAR */ #define PHYAR_FLAG 0x80000000 /* PLA_EEE_CR */ #define EEE_RX_EN 0x0001 #define EEE_TX_EN 0x0002 /* PLA_BOOT_CTRL */ #define AUTOLOAD_DONE 0x0002 /* PLA_LWAKE_CTRL_REG */ #define LANWAKE_PIN BIT(7) /* PLA_SUSPEND_FLAG */ #define LINK_CHG_EVENT BIT(0) /* PLA_INDICATE_FALG */ #define UPCOMING_RUNTIME_D3 BIT(0) /* PLA_MACDBG_PRE and PLA_MACDBG_POST */ #define DEBUG_OE BIT(0) #define DEBUG_LTSSM 0x0082 /* PLA_EXTRA_STATUS */ #define CUR_LINK_OK BIT(15) #define U3P3_CHECK_EN BIT(7) /* RTL_VER_05 only */ #define LINK_CHANGE_FLAG BIT(8) #define POLL_LINK_CHG BIT(0) /* PLA_GPHY_CTRL */ #define GPHY_FLASH BIT(1) /* PLA_POL_GPIO_CTRL */ #define DACK_DET_EN BIT(15) #define POL_GPHY_PATCH BIT(4) /* USB_USB2PHY */ #define USB2PHY_SUSPEND 0x0001 #define USB2PHY_L1 0x0002 /* USB_SSPHYLINK1 */ #define DELAY_PHY_PWR_CHG BIT(1) /* USB_SSPHYLINK2 */ #define pwd_dn_scale_mask 0x3ffe #define pwd_dn_scale(x) ((x) << 1) /* USB_CSR_DUMMY1 */ #define DYNAMIC_BURST 0x0001 /* USB_CSR_DUMMY2 */ #define EP4_FULL_FC 0x0001 /* USB_DEV_STAT */ #define STAT_SPEED_MASK 0x0006 #define STAT_SPEED_HIGH 0x0000 #define STAT_SPEED_FULL 0x0002 /* USB_FW_FIX_EN0 */ #define FW_FIX_SUSPEND BIT(14) /* USB_FW_FIX_EN1 */ #define FW_IP_RESET_EN BIT(9) /* USB_LPM_CONFIG */ #define LPM_U1U2_EN BIT(0) /* USB_TX_AGG */ #define TX_AGG_MAX_THRESHOLD 0x03 /* USB_RX_BUF_TH */ #define RX_THR_SUPPER 0x0c350180 #define RX_THR_HIGH 0x7a120180 #define RX_THR_SLOW 0xffff0180 #define RX_THR_B 0x00010001 /* USB_TX_DMA */ #define TEST_MODE_DISABLE 0x00000001 #define TX_SIZE_ADJUST1 0x00000100 /* USB_BMU_RESET */ #define BMU_RESET_EP_IN 0x01 #define BMU_RESET_EP_OUT 0x02 /* USB_BMU_CONFIG */ #define ACT_ODMA BIT(1) /* USB_UPT_RXDMA_OWN */ #define OWN_UPDATE BIT(0) #define OWN_CLEAR BIT(1) /* USB_FW_TASK */ #define FC_PATCH_TASK BIT(1) /* USB_RX_AGGR_NUM */ #define RX_AGGR_NUM_MASK 0x1ff /* USB_UPS_CTRL */ #define POWER_CUT 0x0100 /* USB_PM_CTRL_STATUS */ #define RESUME_INDICATE 0x0001 /* USB_ECM_OPTION */ #define BYPASS_MAC_RESET BIT(5) /* USB_CSTMR */ #define FORCE_SUPER BIT(0) /* USB_MISC_2 */ #define UPS_FORCE_PWR_DOWN BIT(0) /* USB_ECM_OP */ #define EN_ALL_SPEED BIT(0) /* USB_GPHY_CTRL */ #define GPHY_PATCH_DONE BIT(2) #define BYPASS_FLASH BIT(5) #define BACKUP_RESTRORE BIT(6) /* USB_SPEED_OPTION */ #define RG_PWRDN_EN BIT(8) #define ALL_SPEED_OFF BIT(9) /* USB_FW_CTRL */ #define FLOW_CTRL_PATCH_OPT BIT(1) #define AUTO_SPEEDUP BIT(3) #define FLOW_CTRL_PATCH_2 BIT(8) /* USB_FC_TIMER */ #define CTRL_TIMER_EN BIT(15) /* USB_USB_CTRL */ #define CDC_ECM_EN BIT(3) #define RX_AGG_DISABLE 0x0010 #define RX_ZERO_EN 0x0080 /* USB_U2P3_CTRL */ #define U2P3_ENABLE 0x0001 #define RX_DETECT8 BIT(3) /* USB_POWER_CUT */ #define PWR_EN 0x0001 #define PHASE2_EN 0x0008 #define UPS_EN BIT(4) #define USP_PREWAKE BIT(5) /* USB_MISC_0 */ #define PCUT_STATUS 0x0001 /* USB_RX_EARLY_TIMEOUT */ #define COALESCE_SUPER 85000U #define COALESCE_HIGH 250000U #define COALESCE_SLOW 524280U /* USB_WDT1_CTRL */ #define WTD1_EN BIT(0) /* USB_WDT11_CTRL */ #define TIMER11_EN 0x0001 /* USB_LPM_CTRL */ /* bit 4 ~ 5: fifo empty boundary */ #define FIFO_EMPTY_1FB 0x30 /* 0x1fb * 64 = 32448 bytes */ /* bit 2 ~ 3: LMP timer */ #define LPM_TIMER_MASK 0x0c #define LPM_TIMER_500MS 0x04 /* 500 ms */ #define LPM_TIMER_500US 0x0c /* 500 us */ #define ROK_EXIT_LPM 0x02 /* USB_AFE_CTRL2 */ #define SEN_VAL_MASK 0xf800 #define SEN_VAL_NORMAL 0xa000 #define SEL_RXIDLE 0x0100 /* USB_UPHY_XTAL */ #define OOBS_POLLING BIT(8) /* USB_UPS_CFG */ #define SAW_CNT_1MS_MASK 0x0fff #define MID_REVERSE BIT(5) /* RTL8156A */ /* USB_UPS_FLAGS */ #define UPS_FLAGS_R_TUNE BIT(0) #define UPS_FLAGS_EN_10M_CKDIV BIT(1) #define UPS_FLAGS_250M_CKDIV BIT(2) #define UPS_FLAGS_EN_ALDPS BIT(3) #define UPS_FLAGS_CTAP_SHORT_DIS BIT(4) #define UPS_FLAGS_SPEED_MASK (0xf << 16) #define ups_flags_speed(x) ((x) << 16) #define UPS_FLAGS_EN_EEE BIT(20) #define UPS_FLAGS_EN_500M_EEE BIT(21) #define UPS_FLAGS_EN_EEE_CKDIV BIT(22) #define UPS_FLAGS_EEE_PLLOFF_100 BIT(23) #define UPS_FLAGS_EEE_PLLOFF_GIGA BIT(24) #define UPS_FLAGS_EEE_CMOD_LV_EN BIT(25) #define UPS_FLAGS_EN_GREEN BIT(26) #define UPS_FLAGS_EN_FLOW_CTR BIT(27) enum spd_duplex { NWAY_10M_HALF, NWAY_10M_FULL, NWAY_100M_HALF, NWAY_100M_FULL, NWAY_1000M_FULL, FORCE_10M_HALF, FORCE_10M_FULL, FORCE_100M_HALF, FORCE_100M_FULL, FORCE_1000M_FULL, NWAY_2500M_FULL, }; /* OCP_ALDPS_CONFIG */ #define ENPWRSAVE 0x8000 #define ENPDNPS 0x0200 #define LINKENA 0x0100 #define DIS_SDSAVE 0x0010 /* OCP_PHY_STATUS */ #define PHY_STAT_MASK 0x0007 #define PHY_STAT_EXT_INIT 2 #define PHY_STAT_LAN_ON 3 #define PHY_STAT_PWRDN 5 /* OCP_INTR_EN */ #define INTR_SPEED_FORCE BIT(3) /* OCP_NCTL_CFG */ #define PGA_RETURN_EN BIT(1) /* OCP_POWER_CFG */ #define EEE_CLKDIV_EN 0x8000 #define EN_ALDPS 0x0004 #define EN_10M_PLLOFF 0x0001 /* OCP_EEE_CONFIG1 */ #define RG_TXLPI_MSK_HFDUP 0x8000 #define RG_MATCLR_EN 0x4000 #define EEE_10_CAP 0x2000 #define EEE_NWAY_EN 0x1000 #define TX_QUIET_EN 0x0200 #define RX_QUIET_EN 0x0100 #define sd_rise_time_mask 0x0070 #define sd_rise_time(x) (min(x, 7) << 4) /* bit 4 ~ 6 */ #define RG_RXLPI_MSK_HFDUP 0x0008 #define SDFALLTIME 0x0007 /* bit 0 ~ 2 */ /* OCP_EEE_CONFIG2 */ #define RG_LPIHYS_NUM 0x7000 /* bit 12 ~ 15 */ #define RG_DACQUIET_EN 0x0400 #define RG_LDVQUIET_EN 0x0200 #define RG_CKRSEL 0x0020 #define RG_EEEPRG_EN 0x0010 /* OCP_EEE_CONFIG3 */ #define fast_snr_mask 0xff80 #define fast_snr(x) (min(x, 0x1ff) << 7) /* bit 7 ~ 15 */ #define RG_LFS_SEL 0x0060 /* bit 6 ~ 5 */ #define MSK_PH 0x0006 /* bit 0 ~ 3 */ /* OCP_EEE_AR */ /* bit[15:14] function */ #define FUN_ADDR 0x0000 #define FUN_DATA 0x4000 /* bit[4:0] device addr */ /* OCP_EEE_CFG */ #define CTAP_SHORT_EN 0x0040 #define EEE10_EN 0x0010 /* OCP_DOWN_SPEED */ #define EN_EEE_CMODE BIT(14) #define EN_EEE_1000 BIT(13) #define EN_EEE_100 BIT(12) #define EN_10M_CLKDIV BIT(11) #define EN_10M_BGOFF 0x0080 /* OCP_10GBT_CTRL */ #define RTL_ADV2_5G_F_R BIT(5) /* Advertise 2.5GBASE-T fast-retrain */ /* OCP_PHY_STATE */ #define TXDIS_STATE 0x01 #define ABD_STATE 0x02 /* OCP_PHY_PATCH_STAT */ #define PATCH_READY BIT(6) /* OCP_PHY_PATCH_CMD */ #define PATCH_REQUEST BIT(4) /* OCP_PHY_LOCK */ #define PATCH_LOCK BIT(0) /* OCP_ADC_CFG */ #define CKADSEL_L 0x0100 #define ADC_EN 0x0080 #define EN_EMI_L 0x0040 /* OCP_SYSCLK_CFG */ #define sysclk_div_expo(x) (min(x, 5) << 8) #define clk_div_expo(x) (min(x, 5) << 4) /* SRAM_GREEN_CFG */ #define GREEN_ETH_EN BIT(15) #define R_TUNE_EN BIT(11) /* SRAM_LPF_CFG */ #define LPF_AUTO_TUNE 0x8000 /* SRAM_10M_AMP1 */ #define GDAC_IB_UPALL 0x0008 /* SRAM_10M_AMP2 */ #define AMP_DN 0x0200 /* SRAM_IMPEDANCE */ #define RX_DRIVING_MASK 0x6000 /* SRAM_PHY_LOCK */ #define PHY_PATCH_LOCK 0x0001 /* MAC PASSTHRU */ #define AD_MASK 0xfee0 #define BND_MASK 0x0004 #define BD_MASK 0x0001 #define EFUSE 0xcfdb #define PASS_THRU_MASK 0x1 #define BP4_SUPER_ONLY 0x1578 /* RTL_VER_04 only */ enum rtl_register_content { _2500bps = BIT(10), _1250bps = BIT(9), _500bps = BIT(8), _tx_flow = BIT(6), _rx_flow = BIT(5), _1000bps = 0x10, _100bps = 0x08, _10bps = 0x04, LINK_STATUS = 0x02, FULL_DUP = 0x01, }; #define is_speed_2500(_speed) (((_speed) & (_2500bps | LINK_STATUS)) == (_2500bps | LINK_STATUS)) #define is_flow_control(_speed) (((_speed) & (_tx_flow | _rx_flow)) == (_tx_flow | _rx_flow)) #define RTL8152_MAX_TX 4 #define RTL8152_MAX_RX 10 #define INTBUFSIZE 2 #define TX_ALIGN 4 #define RX_ALIGN 8 #define RTL8152_RX_MAX_PENDING 4096 #define RTL8152_RXFG_HEADSZ 256 #define INTR_LINK 0x0004 #define RTL8152_RMS (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) #define RTL8153_RMS RTL8153_MAX_PACKET #define RTL8152_TX_TIMEOUT (5 * HZ) #define mtu_to_size(m) ((m) + VLAN_ETH_HLEN + ETH_FCS_LEN) #define size_to_mtu(s) ((s) - VLAN_ETH_HLEN - ETH_FCS_LEN) #define rx_reserved_size(x) (mtu_to_size(x) + sizeof(struct rx_desc) + RX_ALIGN) /* rtl8152 flags */ enum rtl8152_flags { RTL8152_INACCESSIBLE = 0, RTL8152_SET_RX_MODE, WORK_ENABLE, RTL8152_LINK_CHG, SELECTIVE_SUSPEND, PHY_RESET, SCHEDULE_TASKLET, GREEN_ETHERNET, RX_EPROTO, IN_PRE_RESET, PROBED_WITH_NO_ERRORS, PROBE_SHOULD_RETRY, }; #define DEVICE_ID_LENOVO_USB_C_TRAVEL_HUB 0x721e #define DEVICE_ID_THINKPAD_ONELINK_PLUS_DOCK 0x3054 #define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2 0x3082 #define DEVICE_ID_THINKPAD_USB_C_DONGLE 0x720c #define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2 0xa387 #define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3 0x3062 #define DEVICE_ID_THINKPAD_HYBRID_USB_C_DOCK 0xa359 struct tally_counter { __le64 tx_packets; __le64 rx_packets; __le64 tx_errors; __le32 rx_errors; __le16 rx_missed; __le16 align_errors; __le32 tx_one_collision; __le32 tx_multi_collision; __le64 rx_unicast; __le64 rx_broadcast; __le32 rx_multicast; __le16 tx_aborted; __le16 tx_underrun; }; struct rx_desc { __le32 opts1; #define RX_LEN_MASK 0x7fff __le32 opts2; #define RD_UDP_CS BIT(23) #define RD_TCP_CS BIT(22) #define RD_IPV6_CS BIT(20) #define RD_IPV4_CS BIT(19) __le32 opts3; #define IPF BIT(23) /* IP checksum fail */ #define UDPF BIT(22) /* UDP checksum fail */ #define TCPF BIT(21) /* TCP checksum fail */ #define RX_VLAN_TAG BIT(16) __le32 opts4; __le32 opts5; __le32 opts6; }; struct tx_desc { __le32 opts1; #define TX_FS BIT(31) /* First segment of a packet */ #define TX_LS BIT(30) /* Final segment of a packet */ #define GTSENDV4 BIT(28) #define GTSENDV6 BIT(27) #define GTTCPHO_SHIFT 18 #define GTTCPHO_MAX 0x7fU #define TX_LEN_MAX 0x3ffffU __le32 opts2; #define UDP_CS BIT(31) /* Calculate UDP/IP checksum */ #define TCP_CS BIT(30) /* Calculate TCP/IP checksum */ #define IPV4_CS BIT(29) /* Calculate IPv4 checksum */ #define IPV6_CS BIT(28) /* Calculate IPv6 checksum */ #define MSS_SHIFT 17 #define MSS_MAX 0x7ffU #define TCPHO_SHIFT 17 #define TCPHO_MAX 0x7ffU #define TX_VLAN_TAG BIT(16) }; struct r8152; struct rx_agg { struct list_head list, info_list; struct urb *urb; struct r8152 *context; struct page *page; void *buffer; }; struct tx_agg { struct list_head list; struct urb *urb; struct r8152 *context; void *buffer; void *head; u32 skb_num; u32 skb_len; }; struct r8152 { unsigned long flags; struct usb_device *udev; struct napi_struct napi; struct usb_interface *intf; struct net_device *netdev; struct urb *intr_urb; struct tx_agg tx_info[RTL8152_MAX_TX]; struct list_head rx_info, rx_used; struct list_head rx_done, tx_free; struct sk_buff_head tx_queue, rx_queue; spinlock_t rx_lock, tx_lock; struct delayed_work schedule, hw_phy_work; struct mii_if_info mii; struct mutex control; /* use for hw setting */ #ifdef CONFIG_PM_SLEEP struct notifier_block pm_notifier; #endif struct tasklet_struct tx_tl; struct rtl_ops { void (*init)(struct r8152 *tp); int (*enable)(struct r8152 *tp); void (*disable)(struct r8152 *tp); void (*up)(struct r8152 *tp); void (*down)(struct r8152 *tp); void (*unload)(struct r8152 *tp); int (*eee_get)(struct r8152 *tp, struct ethtool_keee *eee); int (*eee_set)(struct r8152 *tp, struct ethtool_keee *eee); bool (*in_nway)(struct r8152 *tp); void (*hw_phy_cfg)(struct r8152 *tp); void (*autosuspend_en)(struct r8152 *tp, bool enable); void (*change_mtu)(struct r8152 *tp); } rtl_ops; struct ups_info { u32 r_tune:1; u32 _10m_ckdiv:1; u32 _250m_ckdiv:1; u32 aldps:1; u32 lite_mode:2; u32 speed_duplex:4; u32 eee:1; u32 eee_lite:1; u32 eee_ckdiv:1; u32 eee_plloff_100:1; u32 eee_plloff_giga:1; u32 eee_cmod_lv:1; u32 green:1; u32 flow_control:1; u32 ctap_short_off:1; } ups_info; #define RTL_VER_SIZE 32 struct rtl_fw { const char *fw_name; const struct firmware *fw; char version[RTL_VER_SIZE]; int (*pre_fw)(struct r8152 *tp); int (*post_fw)(struct r8152 *tp); bool retry; } rtl_fw; atomic_t rx_count; bool eee_en; int intr_interval; u32 saved_wolopts; u32 msg_enable; u32 tx_qlen; u32 coalesce; u32 advertising; u32 rx_buf_sz; u32 rx_copybreak; u32 rx_pending; u32 fc_pause_on, fc_pause_off; unsigned int pipe_in, pipe_out, pipe_intr, pipe_ctrl_in, pipe_ctrl_out; u32 support_2500full:1; u32 lenovo_macpassthru:1; u32 dell_tb_rx_agg_bug:1; u16 ocp_base; u16 speed; u16 eee_adv; u8 *intr_buff; u8 version; u8 duplex; u8 autoneg; unsigned int reg_access_reset_count; }; /** * struct fw_block - block type and total length * @type: type of the current block, such as RTL_FW_END, RTL_FW_PLA, * RTL_FW_USB and so on. * @length: total length of the current block. */ struct fw_block { __le32 type; __le32 length; } __packed; /** * struct fw_header - header of the firmware file * @checksum: checksum of sha256 which is calculated from the whole file * except the checksum field of the file. That is, calculate sha256 * from the version field to the end of the file. * @version: version of this firmware. * @blocks: the first firmware block of the file */ struct fw_header { u8 checksum[32]; char version[RTL_VER_SIZE]; struct fw_block blocks[]; } __packed; enum rtl8152_fw_flags { FW_FLAGS_USB = 0, FW_FLAGS_PLA, FW_FLAGS_START, FW_FLAGS_STOP, FW_FLAGS_NC, FW_FLAGS_NC1, FW_FLAGS_NC2, FW_FLAGS_UC2, FW_FLAGS_UC, FW_FLAGS_SPEED_UP, FW_FLAGS_VER, }; enum rtl8152_fw_fixup_cmd { FW_FIXUP_AND = 0, FW_FIXUP_OR, FW_FIXUP_NOT, FW_FIXUP_XOR, }; struct fw_phy_set { __le16 addr; __le16 data; } __packed; struct fw_phy_speed_up { struct fw_block blk_hdr; __le16 fw_offset; __le16 version; __le16 fw_reg; __le16 reserved; char info[]; } __packed; struct fw_phy_ver { struct fw_block blk_hdr; struct fw_phy_set ver; __le32 reserved; } __packed; struct fw_phy_fixup { struct fw_block blk_hdr; struct fw_phy_set setting; __le16 bit_cmd; __le16 reserved; } __packed; struct fw_phy_union { struct fw_block blk_hdr; __le16 fw_offset; __le16 fw_reg; struct fw_phy_set pre_set[2]; struct fw_phy_set bp[8]; struct fw_phy_set bp_en; u8 pre_num; u8 bp_num; char info[]; } __packed; /** * struct fw_mac - a firmware block used by RTL_FW_PLA and RTL_FW_USB. * The layout of the firmware block is: * <struct fw_mac> + <info> + <firmware data>. * @blk_hdr: firmware descriptor (type, length) * @fw_offset: offset of the firmware binary data. The start address of * the data would be the address of struct fw_mac + @fw_offset. * @fw_reg: the register to load the firmware. Depends on chip. * @bp_ba_addr: the register to write break point base address. Depends on * chip. * @bp_ba_value: break point base address. Depends on chip. * @bp_en_addr: the register to write break point enabled mask. Depends * on chip. * @bp_en_value: break point enabled mask. Depends on the firmware. * @bp_start: the start register of break points. Depends on chip. * @bp_num: the break point number which needs to be set for this firmware. * Depends on the firmware. * @bp: break points. Depends on firmware. * @reserved: reserved space (unused) * @fw_ver_reg: the register to store the fw version. * @fw_ver_data: the firmware version of the current type. * @info: additional information for debugging, and is followed by the * binary data of firmware. */ struct fw_mac { struct fw_block blk_hdr; __le16 fw_offset; __le16 fw_reg; __le16 bp_ba_addr; __le16 bp_ba_value; __le16 bp_en_addr; __le16 bp_en_value; __le16 bp_start; __le16 bp_num; __le16 bp[16]; /* any value determined by firmware */ __le32 reserved; __le16 fw_ver_reg; u8 fw_ver_data; char info[]; } __packed; /** * struct fw_phy_patch_key - a firmware block used by RTL_FW_PHY_START. * This is used to set patch key when loading the firmware of PHY. * @blk_hdr: firmware descriptor (type, length) * @key_reg: the register to write the patch key. * @key_data: patch key. * @reserved: reserved space (unused) */ struct fw_phy_patch_key { struct fw_block blk_hdr; __le16 key_reg; __le16 key_data; __le32 reserved; } __packed; /** * struct fw_phy_nc - a firmware block used by RTL_FW_PHY_NC. * The layout of the firmware block is: * <struct fw_phy_nc> + <info> + <firmware data>. * @blk_hdr: firmware descriptor (type, length) * @fw_offset: offset of the firmware binary data. The start address of * the data would be the address of struct fw_phy_nc + @fw_offset. * @fw_reg: the register to load the firmware. Depends on chip. * @ba_reg: the register to write the base address. Depends on chip. * @ba_data: base address. Depends on chip. * @patch_en_addr: the register of enabling patch mode. Depends on chip. * @patch_en_value: patch mode enabled mask. Depends on the firmware. * @mode_reg: the regitster of switching the mode. * @mode_pre: the mode needing to be set before loading the firmware. * @mode_post: the mode to be set when finishing to load the firmware. * @reserved: reserved space (unused) * @bp_start: the start register of break points. Depends on chip. * @bp_num: the break point number which needs to be set for this firmware. * Depends on the firmware. * @bp: break points. Depends on firmware. * @info: additional information for debugging, and is followed by the * binary data of firmware. */ struct fw_phy_nc { struct fw_block blk_hdr; __le16 fw_offset; __le16 fw_reg; __le16 ba_reg; __le16 ba_data; __le16 patch_en_addr; __le16 patch_en_value; __le16 mode_reg; __le16 mode_pre; __le16 mode_post; __le16 reserved; __le16 bp_start; __le16 bp_num; __le16 bp[4]; char info[]; } __packed; enum rtl_fw_type { RTL_FW_END = 0, RTL_FW_PLA, RTL_FW_USB, RTL_FW_PHY_START, RTL_FW_PHY_STOP, RTL_FW_PHY_NC, RTL_FW_PHY_FIXUP, RTL_FW_PHY_UNION_NC, RTL_FW_PHY_UNION_NC1, RTL_FW_PHY_UNION_NC2, RTL_FW_PHY_UNION_UC2, RTL_FW_PHY_UNION_UC, RTL_FW_PHY_UNION_MISC, RTL_FW_PHY_SPEED_UP, RTL_FW_PHY_VER, }; enum rtl_version { RTL_VER_UNKNOWN = 0, RTL_VER_01, RTL_VER_02, RTL_VER_03, RTL_VER_04, RTL_VER_05, RTL_VER_06, RTL_VER_07, RTL_VER_08, RTL_VER_09, RTL_TEST_01, RTL_VER_10, RTL_VER_11, RTL_VER_12, RTL_VER_13, RTL_VER_14, RTL_VER_15, RTL_VER_MAX }; enum tx_csum_stat { TX_CSUM_SUCCESS = 0, TX_CSUM_TSO, TX_CSUM_NONE }; #define RTL_ADVERTISED_10_HALF BIT(0) #define RTL_ADVERTISED_10_FULL BIT(1) #define RTL_ADVERTISED_100_HALF BIT(2) #define RTL_ADVERTISED_100_FULL BIT(3) #define RTL_ADVERTISED_1000_HALF BIT(4) #define RTL_ADVERTISED_1000_FULL BIT(5) #define RTL_ADVERTISED_2500_FULL BIT(6) /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). * The RTL chips use a 64 element hash table based on the Ethernet CRC. */ static const int multicast_filter_limit = 32; static unsigned int agg_buf_sz = 16384; #define RTL_LIMITED_TSO_SIZE (size_to_mtu(agg_buf_sz) - sizeof(struct tx_desc)) /* If register access fails then we block access and issue a reset. If this * happens too many times in a row without a successful access then we stop * trying to reset and just leave access blocked. */ #define REGISTER_ACCESS_MAX_RESETS 3 static void rtl_set_inaccessible(struct r8152 *tp) { set_bit(RTL8152_INACCESSIBLE, &tp->flags); smp_mb__after_atomic(); } static void rtl_set_accessible(struct r8152 *tp) { clear_bit(RTL8152_INACCESSIBLE, &tp->flags); smp_mb__after_atomic(); } static int r8152_control_msg(struct r8152 *tp, unsigned int pipe, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *data, __u16 size, const char *msg_tag) { struct usb_device *udev = tp->udev; int ret; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; ret = usb_control_msg(udev, pipe, request, requesttype, value, index, data, size, USB_CTRL_GET_TIMEOUT); /* No need to issue a reset to report an error if the USB device got * unplugged; just return immediately. */ if (ret == -ENODEV) return ret; /* If the write was successful then we're done */ if (ret >= 0) { tp->reg_access_reset_count = 0; return ret; } dev_err(&udev->dev, "Failed to %s %d bytes at %#06x/%#06x (%d)\n", msg_tag, size, value, index, ret); /* Block all future register access until we reset. Much of the code * in the driver doesn't check for errors. Notably, many parts of the * driver do a read/modify/write of a register value without * confirming that the read succeeded. Writing back modified garbage * like this can fully wedge the adapter, requiring a power cycle. */ rtl_set_inaccessible(tp); /* If probe hasn't yet finished, then we'll request a retry of the * whole probe routine if we get any control transfer errors. We * never have to clear this bit since we free/reallocate the whole "tp" * structure if we retry probe. */ if (!test_bit(PROBED_WITH_NO_ERRORS, &tp->flags)) { set_bit(PROBE_SHOULD_RETRY, &tp->flags); return ret; } /* Failing to access registers in pre-reset is not surprising since we * wouldn't be resetting if things were behaving normally. The register * access we do in pre-reset isn't truly mandatory--we're just reusing * the disable() function and trying to be nice by powering the * adapter down before resetting it. Thus, if we're in pre-reset, * we'll return right away and not try to queue up yet another reset. * We know the post-reset is already coming. */ if (test_bit(IN_PRE_RESET, &tp->flags)) return ret; if (tp->reg_access_reset_count < REGISTER_ACCESS_MAX_RESETS) { usb_queue_reset_device(tp->intf); tp->reg_access_reset_count++; } else if (tp->reg_access_reset_count == REGISTER_ACCESS_MAX_RESETS) { dev_err(&udev->dev, "Tried to reset %d times; giving up.\n", REGISTER_ACCESS_MAX_RESETS); } return ret; } static int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) { int ret; void *tmp; tmp = kmalloc(size, GFP_KERNEL); if (!tmp) return -ENOMEM; ret = r8152_control_msg(tp, tp->pipe_ctrl_in, RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, value, index, tmp, size, "read"); if (ret < 0) memset(data, 0xff, size); else memcpy(data, tmp, size); kfree(tmp); return ret; } static int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) { int ret; void *tmp; tmp = kmemdup(data, size, GFP_KERNEL); if (!tmp) return -ENOMEM; ret = r8152_control_msg(tp, tp->pipe_ctrl_out, RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE, value, index, tmp, size, "write"); kfree(tmp); return ret; } static void rtl_set_unplug(struct r8152 *tp) { if (tp->udev->state == USB_STATE_NOTATTACHED) rtl_set_inaccessible(tp); } static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size, void *data, u16 type) { u16 limit = 64; int ret = 0; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; /* both size and indix must be 4 bytes align */ if ((size & 3) || !size || (index & 3) || !data) return -EPERM; if ((u32)index + (u32)size > 0xffff) return -EPERM; while (size) { if (size > limit) { ret = get_registers(tp, index, type, limit, data); if (ret < 0) break; index += limit; data += limit; size -= limit; } else { ret = get_registers(tp, index, type, size, data); if (ret < 0) break; index += size; data += size; size = 0; break; } } if (ret == -ENODEV) rtl_set_unplug(tp); return ret; } static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 size, void *data, u16 type) { int ret; u16 byteen_start, byteen_end, byen; u16 limit = 512; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; /* both size and indix must be 4 bytes align */ if ((size & 3) || !size || (index & 3) || !data) return -EPERM; if ((u32)index + (u32)size > 0xffff) return -EPERM; byteen_start = byteen & BYTE_EN_START_MASK; byteen_end = byteen & BYTE_EN_END_MASK; byen = byteen_start | (byteen_start << 4); /* Split the first DWORD if the byte_en is not 0xff */ if (byen != BYTE_EN_DWORD) { ret = set_registers(tp, index, type | byen, 4, data); if (ret < 0) goto error1; index += 4; data += 4; size -= 4; } if (size) { byen = byteen_end | (byteen_end >> 4); /* Split the last DWORD if the byte_en is not 0xff */ if (byen != BYTE_EN_DWORD) size -= 4; while (size) { if (size > limit) { ret = set_registers(tp, index, type | BYTE_EN_DWORD, limit, data); if (ret < 0) goto error1; index += limit; data += limit; size -= limit; } else { ret = set_registers(tp, index, type | BYTE_EN_DWORD, size, data); if (ret < 0) goto error1; index += size; data += size; size = 0; break; } } /* Set the last DWORD */ if (byen != BYTE_EN_DWORD) ret = set_registers(tp, index, type | byen, 4, data); } error1: if (ret == -ENODEV) rtl_set_unplug(tp); return ret; } static inline int pla_ocp_read(struct r8152 *tp, u16 index, u16 size, void *data) { return generic_ocp_read(tp, index, size, data, MCU_TYPE_PLA); } static inline int pla_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 size, void *data) { return generic_ocp_write(tp, index, byteen, size, data, MCU_TYPE_PLA); } static inline int usb_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 size, void *data) { return generic_ocp_write(tp, index, byteen, size, data, MCU_TYPE_USB); } static u32 ocp_read_dword(struct r8152 *tp, u16 type, u16 index) { __le32 data; generic_ocp_read(tp, index, sizeof(data), &data, type); return __le32_to_cpu(data); } static void ocp_write_dword(struct r8152 *tp, u16 type, u16 index, u32 data) { __le32 tmp = __cpu_to_le32(data); generic_ocp_write(tp, index, BYTE_EN_DWORD, sizeof(tmp), &tmp, type); } static u16 ocp_read_word(struct r8152 *tp, u16 type, u16 index) { u32 data; __le32 tmp; u16 byen = BYTE_EN_WORD; u8 shift = index & 2; index &= ~3; byen <<= shift; generic_ocp_read(tp, index, sizeof(tmp), &tmp, type | byen); data = __le32_to_cpu(tmp); data >>= (shift * 8); data &= 0xffff; return (u16)data; } static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data) { u32 mask = 0xffff; __le32 tmp; u16 byen = BYTE_EN_WORD; u8 shift = index & 2; data &= mask; if (index & 2) { byen <<= shift; mask <<= (shift * 8); data <<= (shift * 8); index &= ~3; } tmp = __cpu_to_le32(data); generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type); } static u8 ocp_read_byte(struct r8152 *tp, u16 type, u16 index) { u32 data; __le32 tmp; u8 shift = index & 3; index &= ~3; generic_ocp_read(tp, index, sizeof(tmp), &tmp, type); data = __le32_to_cpu(tmp); data >>= (shift * 8); data &= 0xff; return (u8)data; } static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data) { u32 mask = 0xff; __le32 tmp; u16 byen = BYTE_EN_BYTE; u8 shift = index & 3; data &= mask; if (index & 3) { byen <<= shift; mask <<= (shift * 8); data <<= (shift * 8); index &= ~3; } tmp = __cpu_to_le32(data); generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type); } static u16 ocp_reg_read(struct r8152 *tp, u16 addr) { u16 ocp_base, ocp_index; ocp_base = addr & 0xf000; if (ocp_base != tp->ocp_base) { ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, ocp_base); tp->ocp_base = ocp_base; } ocp_index = (addr & 0x0fff) | 0xb000; return ocp_read_word(tp, MCU_TYPE_PLA, ocp_index); } static void ocp_reg_write(struct r8152 *tp, u16 addr, u16 data) { u16 ocp_base, ocp_index; ocp_base = addr & 0xf000; if (ocp_base != tp->ocp_base) { ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, ocp_base); tp->ocp_base = ocp_base; } ocp_index = (addr & 0x0fff) | 0xb000; ocp_write_word(tp, MCU_TYPE_PLA, ocp_index, data); } static inline void r8152_mdio_write(struct r8152 *tp, u32 reg_addr, u32 value) { ocp_reg_write(tp, OCP_BASE_MII + reg_addr * 2, value); } static inline int r8152_mdio_read(struct r8152 *tp, u32 reg_addr) { return ocp_reg_read(tp, OCP_BASE_MII + reg_addr * 2); } static void sram_write(struct r8152 *tp, u16 addr, u16 data) { ocp_reg_write(tp, OCP_SRAM_ADDR, addr); ocp_reg_write(tp, OCP_SRAM_DATA, data); } static u16 sram_read(struct r8152 *tp, u16 addr) { ocp_reg_write(tp, OCP_SRAM_ADDR, addr); return ocp_reg_read(tp, OCP_SRAM_DATA); } static int read_mii_word(struct net_device *netdev, int phy_id, int reg) { struct r8152 *tp = netdev_priv(netdev); int ret; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; if (phy_id != R8152_PHY_ID) return -EINVAL; ret = r8152_mdio_read(tp, reg); return ret; } static void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val) { struct r8152 *tp = netdev_priv(netdev); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (phy_id != R8152_PHY_ID) return; r8152_mdio_write(tp, reg, val); } static int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags); static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex, u32 advertising); static int __rtl8152_set_mac_address(struct net_device *netdev, struct sockaddr_storage *addr, bool in_resume) { struct r8152 *tp = netdev_priv(netdev); int ret = -EADDRNOTAVAIL; if (!is_valid_ether_addr(addr->__data)) goto out1; if (!in_resume) { ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out1; } mutex_lock(&tp->control); eth_hw_addr_set(netdev, addr->__data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES, 8, addr->__data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); mutex_unlock(&tp->control); if (!in_resume) usb_autopm_put_interface(tp->intf); out1: return ret; } static int rtl8152_set_mac_address(struct net_device *netdev, void *p) { return __rtl8152_set_mac_address(netdev, p, false); } /* Devices containing proper chips can support a persistent * host system provided MAC address. * Examples of this are Dell TB15 and Dell WD15 docks */ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr_storage *ss) { acpi_status status; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; int ret = -EINVAL; u32 ocp_data; unsigned char buf[6]; char *mac_obj_name; acpi_object_type mac_obj_type; int mac_strlen; if (tp->lenovo_macpassthru) { mac_obj_name = "\\MACA"; mac_obj_type = ACPI_TYPE_STRING; mac_strlen = 0x16; } else { /* test for -AD variant of RTL8153 */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); if ((ocp_data & AD_MASK) == 0x1000) { /* test for MAC address pass-through bit */ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE); if ((ocp_data & PASS_THRU_MASK) != 1) { netif_dbg(tp, probe, tp->netdev, "No efuse for RTL8153-AD MAC pass through\n"); return -ENODEV; } } else { /* test for RTL8153-BND and RTL8153-BD */ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1); if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK) == 0) { netif_dbg(tp, probe, tp->netdev, "Invalid variant for MAC pass through\n"); return -ENODEV; } } mac_obj_name = "\\_SB.AMAC"; mac_obj_type = ACPI_TYPE_BUFFER; mac_strlen = 0x17; } /* returns _AUXMAC_#AABBCCDDEEFF# */ status = acpi_evaluate_object(NULL, mac_obj_name, NULL, &buffer); obj = (union acpi_object *)buffer.pointer; if (!ACPI_SUCCESS(status)) return -ENODEV; if (obj->type != mac_obj_type || obj->string.length != mac_strlen) { netif_warn(tp, probe, tp->netdev, "Invalid buffer for pass-thru MAC addr: (%d, %d)\n", obj->type, obj->string.length); goto amacout; } if (strncmp(obj->string.pointer, "_AUXMAC_#", 9) != 0 || strncmp(obj->string.pointer + 0x15, "#", 1) != 0) { netif_warn(tp, probe, tp->netdev, "Invalid header when reading pass-thru MAC addr\n"); goto amacout; } ret = hex2bin(buf, obj->string.pointer + 9, 6); if (!(ret == 0 && is_valid_ether_addr(buf))) { netif_warn(tp, probe, tp->netdev, "Invalid MAC for pass-thru MAC addr: %d, %pM\n", ret, buf); ret = -EINVAL; goto amacout; } memcpy(ss->__data, buf, 6); tp->netdev->addr_assign_type = NET_ADDR_STOLEN; netif_info(tp, probe, tp->netdev, "Using pass-thru MAC addr %pM\n", ss->__data); amacout: kfree(obj); return ret; } static int determine_ethernet_addr(struct r8152 *tp, struct sockaddr_storage *ss) { struct net_device *dev = tp->netdev; int ret; ss->ss_family = dev->type; ret = eth_platform_get_mac_address(&tp->udev->dev, ss->__data); if (ret < 0) { if (tp->version == RTL_VER_01) { ret = pla_ocp_read(tp, PLA_IDR, 8, ss->__data); } else { /* if device doesn't support MAC pass through this will * be expected to be non-zero */ ret = vendor_mac_passthru_addr_read(tp, ss); if (ret < 0) ret = pla_ocp_read(tp, PLA_BACKUP, 8, ss->__data); } } if (ret < 0) { netif_err(tp, probe, dev, "Get ether addr fail\n"); } else if (!is_valid_ether_addr(ss->__data)) { netif_err(tp, probe, dev, "Invalid ether addr %pM\n", ss->__data); eth_hw_addr_random(dev); ether_addr_copy(ss->__data, dev->dev_addr); netif_info(tp, probe, dev, "Random ether addr %pM\n", ss->__data); return 0; } return ret; } static int set_ethernet_addr(struct r8152 *tp, bool in_resume) { struct net_device *dev = tp->netdev; struct sockaddr_storage ss; int ret; ret = determine_ethernet_addr(tp, &ss); if (ret < 0) return ret; if (tp->version == RTL_VER_01) eth_hw_addr_set(dev, ss.__data); else ret = __rtl8152_set_mac_address(dev, &ss, in_resume); return ret; } static void read_bulk_callback(struct urb *urb) { struct net_device *netdev; int status = urb->status; struct rx_agg *agg; struct r8152 *tp; unsigned long flags; agg = urb->context; if (!agg) return; tp = agg->context; if (!tp) return; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (!test_bit(WORK_ENABLE, &tp->flags)) return; netdev = tp->netdev; /* When link down, the driver would cancel all bulks. */ /* This avoid the re-submitting bulk */ if (!netif_carrier_ok(netdev)) return; usb_mark_last_busy(tp->udev); switch (status) { case 0: if (urb->actual_length < ETH_ZLEN) break; spin_lock_irqsave(&tp->rx_lock, flags); list_add_tail(&agg->list, &tp->rx_done); spin_unlock_irqrestore(&tp->rx_lock, flags); napi_schedule(&tp->napi); return; case -ESHUTDOWN: rtl_set_unplug(tp); netif_device_detach(tp->netdev); return; case -EPROTO: urb->actual_length = 0; spin_lock_irqsave(&tp->rx_lock, flags); list_add_tail(&agg->list, &tp->rx_done); spin_unlock_irqrestore(&tp->rx_lock, flags); set_bit(RX_EPROTO, &tp->flags); schedule_delayed_work(&tp->schedule, 1); return; case -ENOENT: return; /* the urb is in unlink state */ case -ETIME: if (net_ratelimit()) netdev_warn(netdev, "maybe reset is needed?\n"); break; default: if (net_ratelimit()) netdev_warn(netdev, "Rx status %d\n", status); break; } r8152_submit_rx(tp, agg, GFP_ATOMIC); } static void write_bulk_callback(struct urb *urb) { struct net_device_stats *stats; struct net_device *netdev; struct tx_agg *agg; struct r8152 *tp; unsigned long flags; int status = urb->status; agg = urb->context; if (!agg) return; tp = agg->context; if (!tp) return; netdev = tp->netdev; stats = &netdev->stats; if (status) { if (net_ratelimit()) netdev_warn(netdev, "Tx status %d\n", status); stats->tx_errors += agg->skb_num; } else { stats->tx_packets += agg->skb_num; stats->tx_bytes += agg->skb_len; } spin_lock_irqsave(&tp->tx_lock, flags); list_add_tail(&agg->list, &tp->tx_free); spin_unlock_irqrestore(&tp->tx_lock, flags); usb_autopm_put_interface_async(tp->intf); if (!netif_carrier_ok(netdev)) return; if (!test_bit(WORK_ENABLE, &tp->flags)) return; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (!skb_queue_empty(&tp->tx_queue)) tasklet_schedule(&tp->tx_tl); } static void intr_callback(struct urb *urb) { struct r8152 *tp; __le16 *d; int status = urb->status; int res; tp = urb->context; if (!tp) return; if (!test_bit(WORK_ENABLE, &tp->flags)) return; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; switch (status) { case 0: /* success */ break; case -ECONNRESET: /* unlink */ case -ESHUTDOWN: netif_device_detach(tp->netdev); fallthrough; case -ENOENT: case -EPROTO: netif_info(tp, intr, tp->netdev, "Stop submitting intr, status %d\n", status); return; case -EOVERFLOW: if (net_ratelimit()) netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n"); goto resubmit; /* -EPIPE: should clear the halt */ default: netif_info(tp, intr, tp->netdev, "intr status %d\n", status); goto resubmit; } d = urb->transfer_buffer; if (INTR_LINK & __le16_to_cpu(d[0])) { if (!netif_carrier_ok(tp->netdev)) { set_bit(RTL8152_LINK_CHG, &tp->flags); schedule_delayed_work(&tp->schedule, 0); } } else { if (netif_carrier_ok(tp->netdev)) { netif_stop_queue(tp->netdev); set_bit(RTL8152_LINK_CHG, &tp->flags); schedule_delayed_work(&tp->schedule, 0); } } resubmit: res = usb_submit_urb(urb, GFP_ATOMIC); if (res == -ENODEV) { rtl_set_unplug(tp); netif_device_detach(tp->netdev); } else if (res) { netif_err(tp, intr, tp->netdev, "can't resubmit intr, status %d\n", res); } } static inline void *rx_agg_align(void *data) { return (void *)ALIGN((uintptr_t)data, RX_ALIGN); } static inline void *tx_agg_align(void *data) { return (void *)ALIGN((uintptr_t)data, TX_ALIGN); } static void free_rx_agg(struct r8152 *tp, struct rx_agg *agg) { list_del(&agg->info_list); usb_free_urb(agg->urb); put_page(agg->page); kfree(agg); atomic_dec(&tp->rx_count); } static struct rx_agg *alloc_rx_agg(struct r8152 *tp, gfp_t mflags) { struct net_device *netdev = tp->netdev; int node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1; unsigned int order = get_order(tp->rx_buf_sz); struct rx_agg *rx_agg; unsigned long flags; rx_agg = kmalloc_node(sizeof(*rx_agg), mflags, node); if (!rx_agg) return NULL; rx_agg->page = alloc_pages(mflags | __GFP_COMP | __GFP_NOWARN, order); if (!rx_agg->page) goto free_rx; rx_agg->buffer = page_address(rx_agg->page); rx_agg->urb = usb_alloc_urb(0, mflags); if (!rx_agg->urb) goto free_buf; rx_agg->context = tp; INIT_LIST_HEAD(&rx_agg->list); INIT_LIST_HEAD(&rx_agg->info_list); spin_lock_irqsave(&tp->rx_lock, flags); list_add_tail(&rx_agg->info_list, &tp->rx_info); spin_unlock_irqrestore(&tp->rx_lock, flags); atomic_inc(&tp->rx_count); return rx_agg; free_buf: __free_pages(rx_agg->page, order); free_rx: kfree(rx_agg); return NULL; } static void free_all_mem(struct r8152 *tp) { struct rx_agg *agg, *agg_next; unsigned long flags; int i; spin_lock_irqsave(&tp->rx_lock, flags); list_for_each_entry_safe(agg, agg_next, &tp->rx_info, info_list) free_rx_agg(tp, agg); spin_unlock_irqrestore(&tp->rx_lock, flags); WARN_ON(atomic_read(&tp->rx_count)); for (i = 0; i < RTL8152_MAX_TX; i++) { usb_free_urb(tp->tx_info[i].urb); tp->tx_info[i].urb = NULL; kfree(tp->tx_info[i].buffer); tp->tx_info[i].buffer = NULL; tp->tx_info[i].head = NULL; } usb_free_urb(tp->intr_urb); tp->intr_urb = NULL; kfree(tp->intr_buff); tp->intr_buff = NULL; } static int alloc_all_mem(struct r8152 *tp) { struct net_device *netdev = tp->netdev; struct usb_interface *intf = tp->intf; struct usb_host_interface *alt = intf->cur_altsetting; struct usb_host_endpoint *ep_intr = alt->endpoint + 2; int node, i; node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1; spin_lock_init(&tp->rx_lock); spin_lock_init(&tp->tx_lock); INIT_LIST_HEAD(&tp->rx_info); INIT_LIST_HEAD(&tp->tx_free); INIT_LIST_HEAD(&tp->rx_done); skb_queue_head_init(&tp->tx_queue); skb_queue_head_init(&tp->rx_queue); atomic_set(&tp->rx_count, 0); for (i = 0; i < RTL8152_MAX_RX; i++) { if (!alloc_rx_agg(tp, GFP_KERNEL)) goto err1; } for (i = 0; i < RTL8152_MAX_TX; i++) { struct urb *urb; u8 *buf; buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node); if (!buf) goto err1; if (buf != tx_agg_align(buf)) { kfree(buf); buf = kmalloc_node(agg_buf_sz + TX_ALIGN, GFP_KERNEL, node); if (!buf) goto err1; } urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { kfree(buf); goto err1; } INIT_LIST_HEAD(&tp->tx_info[i].list); tp->tx_info[i].context = tp; tp->tx_info[i].urb = urb; tp->tx_info[i].buffer = buf; tp->tx_info[i].head = tx_agg_align(buf); list_add_tail(&tp->tx_info[i].list, &tp->tx_free); } tp->intr_urb = usb_alloc_urb(0, GFP_KERNEL); if (!tp->intr_urb) goto err1; tp->intr_buff = kmalloc(INTBUFSIZE, GFP_KERNEL); if (!tp->intr_buff) goto err1; tp->intr_interval = (int)ep_intr->desc.bInterval; usb_fill_int_urb(tp->intr_urb, tp->udev, tp->pipe_intr, tp->intr_buff, INTBUFSIZE, intr_callback, tp, tp->intr_interval); return 0; err1: free_all_mem(tp); return -ENOMEM; } static struct tx_agg *r8152_get_tx_agg(struct r8152 *tp) { struct tx_agg *agg = NULL; unsigned long flags; if (list_empty(&tp->tx_free)) return NULL; spin_lock_irqsave(&tp->tx_lock, flags); if (!list_empty(&tp->tx_free)) { struct list_head *cursor; cursor = tp->tx_free.next; list_del_init(cursor); agg = list_entry(cursor, struct tx_agg, list); } spin_unlock_irqrestore(&tp->tx_lock, flags); return agg; } /* r8152_csum_workaround() * The hw limits the value of the transport offset. When the offset is out of * range, calculate the checksum by sw. */ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb, struct sk_buff_head *list) { if (skb_shinfo(skb)->gso_size) { netdev_features_t features = tp->netdev->features; struct sk_buff *segs, *seg, *next; struct sk_buff_head seg_list; features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6); segs = skb_gso_segment(skb, features); if (IS_ERR(segs) || !segs) goto drop; __skb_queue_head_init(&seg_list); skb_list_walk_safe(segs, seg, next) { skb_mark_not_on_list(seg); __skb_queue_tail(&seg_list, seg); } skb_queue_splice(&seg_list, list); dev_kfree_skb(skb); } else if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb_checksum_help(skb) < 0) goto drop; __skb_queue_head(list, skb); } else { struct net_device_stats *stats; drop: stats = &tp->netdev->stats; stats->tx_dropped++; dev_kfree_skb(skb); } } static inline void rtl_tx_vlan_tag(struct tx_desc *desc, struct sk_buff *skb) { if (skb_vlan_tag_present(skb)) { u32 opts2; opts2 = TX_VLAN_TAG | swab16(skb_vlan_tag_get(skb)); desc->opts2 |= cpu_to_le32(opts2); } } static inline void rtl_rx_vlan_tag(struct rx_desc *desc, struct sk_buff *skb) { u32 opts2 = le32_to_cpu(desc->opts2); if (opts2 & RX_VLAN_TAG) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); } static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb, u32 len) { u32 mss = skb_shinfo(skb)->gso_size; u32 opts1, opts2 = 0; int ret = TX_CSUM_SUCCESS; WARN_ON_ONCE(len > TX_LEN_MAX); opts1 = len | TX_FS | TX_LS; if (mss) { u32 transport_offset = (u32)skb_transport_offset(skb); if (transport_offset > GTTCPHO_MAX) { netif_warn(tp, tx_err, tp->netdev, "Invalid transport offset 0x%x for TSO\n", transport_offset); ret = TX_CSUM_TSO; goto unavailable; } switch (vlan_get_protocol(skb)) { case htons(ETH_P_IP): opts1 |= GTSENDV4; break; case htons(ETH_P_IPV6): if (skb_cow_head(skb, 0)) { ret = TX_CSUM_TSO; goto unavailable; } tcp_v6_gso_csum_prep(skb); opts1 |= GTSENDV6; break; default: WARN_ON_ONCE(1); break; } opts1 |= transport_offset << GTTCPHO_SHIFT; opts2 |= min(mss, MSS_MAX) << MSS_SHIFT; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { u32 transport_offset = (u32)skb_transport_offset(skb); u8 ip_protocol; if (transport_offset > TCPHO_MAX) { netif_warn(tp, tx_err, tp->netdev, "Invalid transport offset 0x%x\n", transport_offset); ret = TX_CSUM_NONE; goto unavailable; } switch (vlan_get_protocol(skb)) { case htons(ETH_P_IP): opts2 |= IPV4_CS; ip_protocol = ip_hdr(skb)->protocol; break; case htons(ETH_P_IPV6): opts2 |= IPV6_CS; ip_protocol = ipv6_hdr(skb)->nexthdr; break; default: ip_protocol = IPPROTO_RAW; break; } if (ip_protocol == IPPROTO_TCP) opts2 |= TCP_CS; else if (ip_protocol == IPPROTO_UDP) opts2 |= UDP_CS; else WARN_ON_ONCE(1); opts2 |= transport_offset << TCPHO_SHIFT; } desc->opts2 = cpu_to_le32(opts2); desc->opts1 = cpu_to_le32(opts1); unavailable: return ret; } static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) { struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue; int remain, ret; u8 *tx_data; __skb_queue_head_init(&skb_head); spin_lock(&tx_queue->lock); skb_queue_splice_init(tx_queue, &skb_head); spin_unlock(&tx_queue->lock); tx_data = agg->head; agg->skb_num = 0; agg->skb_len = 0; remain = agg_buf_sz; while (remain >= ETH_ZLEN + sizeof(struct tx_desc)) { struct tx_desc *tx_desc; struct sk_buff *skb; unsigned int len; skb = __skb_dequeue(&skb_head); if (!skb) break; len = skb->len + sizeof(*tx_desc); if (len > remain) { __skb_queue_head(&skb_head, skb); break; } tx_data = tx_agg_align(tx_data); tx_desc = (struct tx_desc *)tx_data; if (r8152_tx_csum(tp, tx_desc, skb, skb->len)) { r8152_csum_workaround(tp, skb, &skb_head); continue; } rtl_tx_vlan_tag(tx_desc, skb); tx_data += sizeof(*tx_desc); len = skb->len; if (skb_copy_bits(skb, 0, tx_data, len) < 0) { struct net_device_stats *stats = &tp->netdev->stats; stats->tx_dropped++; dev_kfree_skb_any(skb); tx_data -= sizeof(*tx_desc); continue; } tx_data += len; agg->skb_len += len; agg->skb_num += skb_shinfo(skb)->gso_segs ?: 1; dev_kfree_skb_any(skb); remain = agg_buf_sz - (int)(tx_agg_align(tx_data) - agg->head); if (tp->dell_tb_rx_agg_bug) break; } if (!skb_queue_empty(&skb_head)) { spin_lock(&tx_queue->lock); skb_queue_splice(&skb_head, tx_queue); spin_unlock(&tx_queue->lock); } netif_tx_lock(tp->netdev); if (netif_queue_stopped(tp->netdev) && skb_queue_len(&tp->tx_queue) < tp->tx_qlen) netif_wake_queue(tp->netdev); netif_tx_unlock(tp->netdev); ret = usb_autopm_get_interface_async(tp->intf); if (ret < 0) goto out_tx_fill; usb_fill_bulk_urb(agg->urb, tp->udev, tp->pipe_out, agg->head, (int)(tx_data - (u8 *)agg->head), (usb_complete_t)write_bulk_callback, agg); ret = usb_submit_urb(agg->urb, GFP_ATOMIC); if (ret < 0) usb_autopm_put_interface_async(tp->intf); out_tx_fill: return ret; } static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc) { u8 checksum = CHECKSUM_NONE; u32 opts2, opts3; if (!(tp->netdev->features & NETIF_F_RXCSUM)) goto return_result; opts2 = le32_to_cpu(rx_desc->opts2); opts3 = le32_to_cpu(rx_desc->opts3); if (opts2 & RD_IPV4_CS) { if (opts3 & IPF) checksum = CHECKSUM_NONE; else if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF)) checksum = CHECKSUM_UNNECESSARY; else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF)) checksum = CHECKSUM_UNNECESSARY; } else if (opts2 & RD_IPV6_CS) { if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF)) checksum = CHECKSUM_UNNECESSARY; else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF)) checksum = CHECKSUM_UNNECESSARY; } return_result: return checksum; } static inline bool rx_count_exceed(struct r8152 *tp) { return atomic_read(&tp->rx_count) > RTL8152_MAX_RX; } static inline int agg_offset(struct rx_agg *agg, void *addr) { return (int)(addr - agg->buffer); } static struct rx_agg *rtl_get_free_rx(struct r8152 *tp, gfp_t mflags) { struct rx_agg *agg, *agg_next, *agg_free = NULL; unsigned long flags; spin_lock_irqsave(&tp->rx_lock, flags); list_for_each_entry_safe(agg, agg_next, &tp->rx_used, list) { if (page_count(agg->page) == 1) { if (!agg_free) { list_del_init(&agg->list); agg_free = agg; continue; } if (rx_count_exceed(tp)) { list_del_init(&agg->list); free_rx_agg(tp, agg); } break; } } spin_unlock_irqrestore(&tp->rx_lock, flags); if (!agg_free && atomic_read(&tp->rx_count) < tp->rx_pending) agg_free = alloc_rx_agg(tp, mflags); return agg_free; } static int rx_bottom(struct r8152 *tp, int budget) { unsigned long flags; struct list_head *cursor, *next, rx_queue; int ret = 0, work_done = 0; struct napi_struct *napi = &tp->napi; if (!skb_queue_empty(&tp->rx_queue)) { while (work_done < budget) { struct sk_buff *skb = __skb_dequeue(&tp->rx_queue); struct net_device *netdev = tp->netdev; struct net_device_stats *stats = &netdev->stats; unsigned int pkt_len; if (!skb) break; pkt_len = skb->len; napi_gro_receive(napi, skb); work_done++; stats->rx_packets++; stats->rx_bytes += pkt_len; } } if (list_empty(&tp->rx_done) || work_done >= budget) goto out1; clear_bit(RX_EPROTO, &tp->flags); INIT_LIST_HEAD(&rx_queue); spin_lock_irqsave(&tp->rx_lock, flags); list_splice_init(&tp->rx_done, &rx_queue); spin_unlock_irqrestore(&tp->rx_lock, flags); list_for_each_safe(cursor, next, &rx_queue) { struct rx_desc *rx_desc; struct rx_agg *agg, *agg_free; int len_used = 0; struct urb *urb; u8 *rx_data; /* A bulk transfer of USB may contain may packets, so the * total packets may more than the budget. Deal with all * packets in current bulk transfer, and stop to handle the * next bulk transfer until next schedule, if budget is * exhausted. */ if (work_done >= budget) break; list_del_init(cursor); agg = list_entry(cursor, struct rx_agg, list); urb = agg->urb; if (urb->status != 0 || urb->actual_length < ETH_ZLEN) goto submit; agg_free = rtl_get_free_rx(tp, GFP_ATOMIC); rx_desc = agg->buffer; rx_data = agg->buffer; len_used += sizeof(struct rx_desc); while (urb->actual_length > len_used) { struct net_device *netdev = tp->netdev; struct net_device_stats *stats = &netdev->stats; unsigned int pkt_len, rx_frag_head_sz, len; struct sk_buff *skb; bool use_frags; WARN_ON_ONCE(skb_queue_len(&tp->rx_queue) >= 1000); pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK; if (pkt_len < ETH_ZLEN) break; len_used += pkt_len; if (urb->actual_length < len_used) break; pkt_len -= ETH_FCS_LEN; len = pkt_len; rx_data += sizeof(struct rx_desc); if (!agg_free || tp->rx_copybreak > len) use_frags = false; else use_frags = true; if (use_frags) { /* If the budget is exhausted, the packet * would be queued in the driver. That is, * napi_gro_frags() wouldn't be called, so * we couldn't use napi_get_frags(). */ if (work_done >= budget) { rx_frag_head_sz = tp->rx_copybreak; skb = napi_alloc_skb(napi, rx_frag_head_sz); } else { rx_frag_head_sz = 0; skb = napi_get_frags(napi); } } else { rx_frag_head_sz = 0; skb = napi_alloc_skb(napi, len); } if (!skb) { stats->rx_dropped++; goto find_next_rx; } skb->ip_summed = r8152_rx_csum(tp, rx_desc); rtl_rx_vlan_tag(rx_desc, skb); if (use_frags) { if (rx_frag_head_sz) { memcpy(skb->data, rx_data, rx_frag_head_sz); skb_put(skb, rx_frag_head_sz); len -= rx_frag_head_sz; rx_data += rx_frag_head_sz; skb->protocol = eth_type_trans(skb, netdev); } skb_add_rx_frag(skb, 0, agg->page, agg_offset(agg, rx_data), len, SKB_DATA_ALIGN(len)); get_page(agg->page); } else { memcpy(skb->data, rx_data, len); skb_put(skb, len); skb->protocol = eth_type_trans(skb, netdev); } if (work_done < budget) { if (use_frags) napi_gro_frags(napi); else napi_gro_receive(napi, skb); work_done++; stats->rx_packets++; stats->rx_bytes += pkt_len; } else { __skb_queue_tail(&tp->rx_queue, skb); } find_next_rx: rx_data = rx_agg_align(rx_data + len + ETH_FCS_LEN); rx_desc = (struct rx_desc *)rx_data; len_used = agg_offset(agg, rx_data); len_used += sizeof(struct rx_desc); } WARN_ON(!agg_free && page_count(agg->page) > 1); if (agg_free) { spin_lock_irqsave(&tp->rx_lock, flags); if (page_count(agg->page) == 1) { list_add(&agg_free->list, &tp->rx_used); } else { list_add_tail(&agg->list, &tp->rx_used); agg = agg_free; urb = agg->urb; } spin_unlock_irqrestore(&tp->rx_lock, flags); } submit: if (!ret) { ret = r8152_submit_rx(tp, agg, GFP_ATOMIC); } else { urb->actual_length = 0; list_add_tail(&agg->list, next); } } /* Splice the remained list back to rx_done for next schedule */ if (!list_empty(&rx_queue)) { spin_lock_irqsave(&tp->rx_lock, flags); list_splice(&rx_queue, &tp->rx_done); spin_unlock_irqrestore(&tp->rx_lock, flags); } out1: return work_done; } static void tx_bottom(struct r8152 *tp) { int res; do { struct net_device *netdev = tp->netdev; struct tx_agg *agg; if (skb_queue_empty(&tp->tx_queue)) break; agg = r8152_get_tx_agg(tp); if (!agg) break; res = r8152_tx_agg_fill(tp, agg); if (!res) continue; if (res == -ENODEV) { rtl_set_unplug(tp); netif_device_detach(netdev); } else { struct net_device_stats *stats = &netdev->stats; unsigned long flags; netif_warn(tp, tx_err, netdev, "failed tx_urb %d\n", res); stats->tx_dropped += agg->skb_num; spin_lock_irqsave(&tp->tx_lock, flags); list_add_tail(&agg->list, &tp->tx_free); spin_unlock_irqrestore(&tp->tx_lock, flags); } } while (res == 0); } static void bottom_half(struct tasklet_struct *t) { struct r8152 *tp = from_tasklet(tp, t, tx_tl); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (!test_bit(WORK_ENABLE, &tp->flags)) return; /* When link down, the driver would cancel all bulks. */ /* This avoid the re-submitting bulk */ if (!netif_carrier_ok(tp->netdev)) return; clear_bit(SCHEDULE_TASKLET, &tp->flags); tx_bottom(tp); } static int r8152_poll(struct napi_struct *napi, int budget) { struct r8152 *tp = container_of(napi, struct r8152, napi); int work_done; if (!budget) return 0; work_done = rx_bottom(tp, budget); if (work_done < budget) { if (!napi_complete_done(napi, work_done)) goto out; if (!list_empty(&tp->rx_done)) napi_schedule(napi); } out: return work_done; } static int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags) { int ret; /* The rx would be stopped, so skip submitting */ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags) || !test_bit(WORK_ENABLE, &tp->flags) || !netif_carrier_ok(tp->netdev)) return 0; usb_fill_bulk_urb(agg->urb, tp->udev, tp->pipe_in, agg->buffer, tp->rx_buf_sz, (usb_complete_t)read_bulk_callback, agg); ret = usb_submit_urb(agg->urb, mem_flags); if (ret == -ENODEV) { rtl_set_unplug(tp); netif_device_detach(tp->netdev); } else if (ret) { struct urb *urb = agg->urb; unsigned long flags; urb->actual_length = 0; spin_lock_irqsave(&tp->rx_lock, flags); list_add_tail(&agg->list, &tp->rx_done); spin_unlock_irqrestore(&tp->rx_lock, flags); netif_err(tp, rx_err, tp->netdev, "Couldn't submit rx[%p], ret = %d\n", agg, ret); napi_schedule(&tp->napi); } return ret; } static void rtl_drop_queued_tx(struct r8152 *tp) { struct net_device_stats *stats = &tp->netdev->stats; struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue; struct sk_buff *skb; if (skb_queue_empty(tx_queue)) return; __skb_queue_head_init(&skb_head); spin_lock_bh(&tx_queue->lock); skb_queue_splice_init(tx_queue, &skb_head); spin_unlock_bh(&tx_queue->lock); while ((skb = __skb_dequeue(&skb_head))) { dev_kfree_skb(skb); stats->tx_dropped++; } } static void rtl8152_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct r8152 *tp = netdev_priv(netdev); netif_warn(tp, tx_err, netdev, "Tx timeout\n"); usb_queue_reset_device(tp->intf); } static void rtl8152_set_rx_mode(struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); if (netif_carrier_ok(netdev)) { set_bit(RTL8152_SET_RX_MODE, &tp->flags); schedule_delayed_work(&tp->schedule, 0); } } static void _rtl8152_set_rx_mode(struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); u32 mc_filter[2]; /* Multicast hash filter */ __le32 tmp[2]; u32 ocp_data; netif_stop_queue(netdev); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data &= ~RCR_ACPT_ALL; ocp_data |= RCR_AB | RCR_APM; if (netdev->flags & IFF_PROMISC) { /* Unconditionally log net taps. */ netif_notice(tp, link, netdev, "Promiscuous mode enabled\n"); ocp_data |= RCR_AM | RCR_AAP; mc_filter[1] = 0xffffffff; mc_filter[0] = 0xffffffff; } else if ((netdev->flags & IFF_MULTICAST && netdev_mc_count(netdev) > multicast_filter_limit) || (netdev->flags & IFF_ALLMULTI)) { /* Too many to filter perfectly -- accept all multicasts. */ ocp_data |= RCR_AM; mc_filter[1] = 0xffffffff; mc_filter[0] = 0xffffffff; } else { mc_filter[1] = 0; mc_filter[0] = 0; if (netdev->flags & IFF_MULTICAST) { struct netdev_hw_addr *ha; netdev_for_each_mc_addr(ha, netdev) { int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); ocp_data |= RCR_AM; } } } tmp[0] = __cpu_to_le32(swab32(mc_filter[1])); tmp[1] = __cpu_to_le32(swab32(mc_filter[0])); pla_ocp_write(tp, PLA_MAR, BYTE_EN_DWORD, sizeof(tmp), tmp); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); netif_wake_queue(netdev); } static netdev_features_t rtl8152_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { u32 mss = skb_shinfo(skb)->gso_size; int max_offset = mss ? GTTCPHO_MAX : TCPHO_MAX; if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) && skb_transport_offset(skb) > max_offset) features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); else if ((skb->len + sizeof(struct tx_desc)) > agg_buf_sz) features &= ~NETIF_F_GSO_MASK; return features; } static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); skb_tx_timestamp(skb); skb_queue_tail(&tp->tx_queue, skb); if (!list_empty(&tp->tx_free)) { if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { set_bit(SCHEDULE_TASKLET, &tp->flags); schedule_delayed_work(&tp->schedule, 0); } else { usb_mark_last_busy(tp->udev); tasklet_schedule(&tp->tx_tl); } } else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen) { netif_stop_queue(netdev); } return NETDEV_TX_OK; } static void r8152b_reset_packet_filter(struct r8152 *tp) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_FMC); ocp_data &= ~FMC_FCR_MCU_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_FMC, ocp_data); ocp_data |= FMC_FCR_MCU_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_FMC, ocp_data); } static void rtl8152_nic_reset(struct r8152 *tp) { u32 ocp_data; int i; switch (tp->version) { case RTL_TEST_01: case RTL_VER_10: case RTL_VER_11: ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR); ocp_data &= ~CR_TE; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_BMU_RESET); ocp_data &= ~BMU_RESET_EP_IN; ocp_write_word(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data |= CDC_ECM_EN; ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR); ocp_data &= ~CR_RE; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_BMU_RESET); ocp_data |= BMU_RESET_EP_IN; ocp_write_word(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data &= ~CDC_ECM_EN; ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); break; default: ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, CR_RST); for (i = 0; i < 1000; i++) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; if (!(ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR) & CR_RST)) break; usleep_range(100, 400); } break; } } static void set_tx_qlen(struct r8152 *tp) { tp->tx_qlen = agg_buf_sz / (mtu_to_size(tp->netdev->mtu) + sizeof(struct tx_desc)); } static inline u16 rtl8152_get_speed(struct r8152 *tp) { return ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHYSTATUS); } static void rtl_eee_plus_en(struct r8152 *tp, bool enable) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR); if (enable) ocp_data |= EEEP_CR_EEEP_TX; else ocp_data &= ~EEEP_CR_EEEP_TX; ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data); } static void rtl_set_eee_plus(struct r8152 *tp) { if (rtl8152_get_speed(tp) & _10bps) rtl_eee_plus_en(tp, true); else rtl_eee_plus_en(tp, false); } static void rxdy_gated_en(struct r8152 *tp, bool enable) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1); if (enable) ocp_data |= RXDY_GATED_EN; else ocp_data &= ~RXDY_GATED_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data); } static int rtl_start_rx(struct r8152 *tp) { struct rx_agg *agg, *agg_next; struct list_head tmp_list; unsigned long flags; int ret = 0, i = 0; INIT_LIST_HEAD(&tmp_list); spin_lock_irqsave(&tp->rx_lock, flags); INIT_LIST_HEAD(&tp->rx_done); INIT_LIST_HEAD(&tp->rx_used); list_splice_init(&tp->rx_info, &tmp_list); spin_unlock_irqrestore(&tp->rx_lock, flags); list_for_each_entry_safe(agg, agg_next, &tmp_list, info_list) { INIT_LIST_HEAD(&agg->list); /* Only RTL8152_MAX_RX rx_agg need to be submitted. */ if (++i > RTL8152_MAX_RX) { spin_lock_irqsave(&tp->rx_lock, flags); list_add_tail(&agg->list, &tp->rx_used); spin_unlock_irqrestore(&tp->rx_lock, flags); } else if (unlikely(ret < 0)) { spin_lock_irqsave(&tp->rx_lock, flags); list_add_tail(&agg->list, &tp->rx_done); spin_unlock_irqrestore(&tp->rx_lock, flags); } else { ret = r8152_submit_rx(tp, agg, GFP_KERNEL); } } spin_lock_irqsave(&tp->rx_lock, flags); WARN_ON(!list_empty(&tp->rx_info)); list_splice(&tmp_list, &tp->rx_info); spin_unlock_irqrestore(&tp->rx_lock, flags); return ret; } static int rtl_stop_rx(struct r8152 *tp) { struct rx_agg *agg, *agg_next; struct list_head tmp_list; unsigned long flags; INIT_LIST_HEAD(&tmp_list); /* The usb_kill_urb() couldn't be used in atomic. * Therefore, move the list of rx_info to a tmp one. * Then, list_for_each_entry_safe could be used without * spin lock. */ spin_lock_irqsave(&tp->rx_lock, flags); list_splice_init(&tp->rx_info, &tmp_list); spin_unlock_irqrestore(&tp->rx_lock, flags); list_for_each_entry_safe(agg, agg_next, &tmp_list, info_list) { /* At least RTL8152_MAX_RX rx_agg have the page_count being * equal to 1, so the other ones could be freed safely. */ if (page_count(agg->page) > 1) free_rx_agg(tp, agg); else usb_kill_urb(agg->urb); } /* Move back the list of temp to the rx_info */ spin_lock_irqsave(&tp->rx_lock, flags); WARN_ON(!list_empty(&tp->rx_info)); list_splice(&tmp_list, &tp->rx_info); spin_unlock_irqrestore(&tp->rx_lock, flags); while (!skb_queue_empty(&tp->rx_queue)) dev_kfree_skb(__skb_dequeue(&tp->rx_queue)); return 0; } static void rtl_set_ifg(struct r8152 *tp, u16 speed) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR1); ocp_data &= ~IFG_MASK; if ((speed & (_10bps | _100bps)) && !(speed & FULL_DUP)) { ocp_data |= IFG_144NS; ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR1, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); ocp_data &= ~TX10MIDLE_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); } else { ocp_data |= IFG_96NS; ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR1, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); ocp_data |= TX10MIDLE_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); } } static inline void r8153b_rx_agg_chg_indicate(struct r8152 *tp) { ocp_write_byte(tp, MCU_TYPE_USB, USB_UPT_RXDMA_OWN, OWN_UPDATE | OWN_CLEAR); } static int rtl_enable(struct r8152 *tp) { u32 ocp_data; r8152b_reset_packet_filter(tp); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR); ocp_data |= CR_RE | CR_TE; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data); switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: case RTL_VER_07: break; default: r8153b_rx_agg_chg_indicate(tp); break; } rxdy_gated_en(tp, false); return 0; } static int rtl8152_enable(struct r8152 *tp) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; set_tx_qlen(tp); rtl_set_eee_plus(tp); return rtl_enable(tp); } static void r8153_set_rx_early_timeout(struct r8152 *tp) { u32 ocp_data = tp->coalesce / 8; switch (tp->version) { case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_TIMEOUT, ocp_data); break; case RTL_VER_08: case RTL_VER_09: case RTL_VER_14: /* The RTL8153B uses USB_RX_EXTRA_AGGR_TMR for rx timeout * primarily. For USB_RX_EARLY_TIMEOUT, we fix it to 128ns. */ ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_TIMEOUT, 128 / 8); ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EXTRA_AGGR_TMR, ocp_data); break; case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_TIMEOUT, 640 / 8); ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EXTRA_AGGR_TMR, ocp_data); break; default: break; } } static void r8153_set_rx_early_size(struct r8152 *tp) { u32 ocp_data = tp->rx_buf_sz - rx_reserved_size(tp->netdev->mtu); switch (tp->version) { case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data / 4); break; case RTL_VER_08: case RTL_VER_09: case RTL_VER_14: ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data / 8); break; case RTL_TEST_01: case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data / 8); break; default: WARN_ON_ONCE(1); break; } } static int rtl8153_enable(struct r8152 *tp) { u32 ocp_data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; set_tx_qlen(tp); rtl_set_eee_plus(tp); r8153_set_rx_early_timeout(tp); r8153_set_rx_early_size(tp); rtl_set_ifg(tp, rtl8152_get_speed(tp)); switch (tp->version) { case RTL_VER_09: case RTL_VER_14: ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); ocp_data &= ~FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); usleep_range(1000, 2000); ocp_data |= FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); break; default: break; } return rtl_enable(tp); } static void rtl_disable(struct r8152 *tp) { u32 ocp_data; int i; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); return; } ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data &= ~RCR_ACPT_ALL; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); rtl_drop_queued_tx(tp); for (i = 0; i < RTL8152_MAX_TX; i++) usb_kill_urb(tp->tx_info[i].urb); rxdy_gated_en(tp, true); for (i = 0; i < 1000; i++) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if ((ocp_data & FIFO_EMPTY) == FIFO_EMPTY) break; usleep_range(1000, 2000); } for (i = 0; i < 1000; i++) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0) & TCR0_TX_EMPTY) break; usleep_range(1000, 2000); } rtl_stop_rx(tp); rtl8152_nic_reset(tp); } static void r8152_power_cut_en(struct r8152 *tp, bool enable) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL); if (enable) ocp_data |= POWER_CUT; else ocp_data &= ~POWER_CUT; ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS); ocp_data &= ~RESUME_INDICATE; ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data); } static void rtl_rx_vlan_en(struct r8152 *tp, bool enable) { u32 ocp_data; switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: case RTL_VER_07: case RTL_VER_08: case RTL_VER_09: case RTL_VER_14: ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); if (enable) ocp_data |= CPCR_RX_VLAN; else ocp_data &= ~CPCR_RX_VLAN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); break; case RTL_TEST_01: case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: default: ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_RCR1); if (enable) ocp_data |= OUTER_VLAN | INNER_VLAN; else ocp_data &= ~(OUTER_VLAN | INNER_VLAN); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RCR1, ocp_data); break; } } static int rtl8152_set_features(struct net_device *dev, netdev_features_t features) { netdev_features_t changed = features ^ dev->features; struct r8152 *tp = netdev_priv(dev); int ret; ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out; mutex_lock(&tp->control); if (changed & NETIF_F_HW_VLAN_CTAG_RX) { if (features & NETIF_F_HW_VLAN_CTAG_RX) rtl_rx_vlan_en(tp, true); else rtl_rx_vlan_en(tp, false); } mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); out: return ret; } #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) static u32 __rtl_get_wol(struct r8152 *tp) { u32 ocp_data; u32 wolopts = 0; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); if (ocp_data & LINK_ON_WAKE_EN) wolopts |= WAKE_PHY; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5); if (ocp_data & UWF_EN) wolopts |= WAKE_UCAST; if (ocp_data & BWF_EN) wolopts |= WAKE_BCAST; if (ocp_data & MWF_EN) wolopts |= WAKE_MCAST; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL); if (ocp_data & MAGIC_EN) wolopts |= WAKE_MAGIC; return wolopts; } static void __rtl_set_wol(struct r8152 *tp, u32 wolopts) { u32 ocp_data; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); ocp_data &= ~LINK_ON_WAKE_EN; if (wolopts & WAKE_PHY) ocp_data |= LINK_ON_WAKE_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5); ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN); if (wolopts & WAKE_UCAST) ocp_data |= UWF_EN; if (wolopts & WAKE_BCAST) ocp_data |= BWF_EN; if (wolopts & WAKE_MCAST) ocp_data |= MWF_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL); ocp_data &= ~MAGIC_EN; if (wolopts & WAKE_MAGIC) ocp_data |= MAGIC_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL, ocp_data); if (wolopts & WAKE_ANY) device_set_wakeup_enable(&tp->udev->dev, true); else device_set_wakeup_enable(&tp->udev->dev, false); } static void r8153_mac_clk_speed_down(struct r8152 *tp, bool enable) { u32 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2); /* MAC clock speed down */ if (enable) ocp_data |= MAC_CLK_SPDWN_EN; else ocp_data &= ~MAC_CLK_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); } static void r8156_mac_clk_spd(struct r8152 *tp, bool enable) { u32 ocp_data; /* MAC clock speed down */ if (enable) { /* aldps_spdwn_ratio, tp10_spdwn_ratio */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0x0403); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2); ocp_data &= ~EEE_SPDWN_RATIO_MASK; ocp_data |= MAC_CLK_SPDWN_EN | 0x03; /* eee_spdwn_ratio */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); } else { ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2); ocp_data &= ~MAC_CLK_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); } } static void r8153_u1u2en(struct r8152 *tp, bool enable) { u8 u1u2[8]; if (enable) memset(u1u2, 0xff, sizeof(u1u2)); else memset(u1u2, 0x00, sizeof(u1u2)); usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2); } static void r8153b_u1u2en(struct r8152 *tp, bool enable) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_LPM_CONFIG); if (enable) ocp_data |= LPM_U1U2_EN; else ocp_data &= ~LPM_U1U2_EN; ocp_write_word(tp, MCU_TYPE_USB, USB_LPM_CONFIG, ocp_data); } static void r8153_u2p3en(struct r8152 *tp, bool enable) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL); if (enable) ocp_data |= U2P3_ENABLE; else ocp_data &= ~U2P3_ENABLE; ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data); } static void r8153b_ups_flags(struct r8152 *tp) { u32 ups_flags = 0; if (tp->ups_info.green) ups_flags |= UPS_FLAGS_EN_GREEN; if (tp->ups_info.aldps) ups_flags |= UPS_FLAGS_EN_ALDPS; if (tp->ups_info.eee) ups_flags |= UPS_FLAGS_EN_EEE; if (tp->ups_info.flow_control) ups_flags |= UPS_FLAGS_EN_FLOW_CTR; if (tp->ups_info.eee_ckdiv) ups_flags |= UPS_FLAGS_EN_EEE_CKDIV; if (tp->ups_info.eee_cmod_lv) ups_flags |= UPS_FLAGS_EEE_CMOD_LV_EN; if (tp->ups_info.r_tune) ups_flags |= UPS_FLAGS_R_TUNE; if (tp->ups_info._10m_ckdiv) ups_flags |= UPS_FLAGS_EN_10M_CKDIV; if (tp->ups_info.eee_plloff_100) ups_flags |= UPS_FLAGS_EEE_PLLOFF_100; if (tp->ups_info.eee_plloff_giga) ups_flags |= UPS_FLAGS_EEE_PLLOFF_GIGA; if (tp->ups_info._250m_ckdiv) ups_flags |= UPS_FLAGS_250M_CKDIV; if (tp->ups_info.ctap_short_off) ups_flags |= UPS_FLAGS_CTAP_SHORT_DIS; switch (tp->ups_info.speed_duplex) { case NWAY_10M_HALF: ups_flags |= ups_flags_speed(1); break; case NWAY_10M_FULL: ups_flags |= ups_flags_speed(2); break; case NWAY_100M_HALF: ups_flags |= ups_flags_speed(3); break; case NWAY_100M_FULL: ups_flags |= ups_flags_speed(4); break; case NWAY_1000M_FULL: ups_flags |= ups_flags_speed(5); break; case FORCE_10M_HALF: ups_flags |= ups_flags_speed(6); break; case FORCE_10M_FULL: ups_flags |= ups_flags_speed(7); break; case FORCE_100M_HALF: ups_flags |= ups_flags_speed(8); break; case FORCE_100M_FULL: ups_flags |= ups_flags_speed(9); break; default: break; } ocp_write_dword(tp, MCU_TYPE_USB, USB_UPS_FLAGS, ups_flags); } static void r8156_ups_flags(struct r8152 *tp) { u32 ups_flags = 0; if (tp->ups_info.green) ups_flags |= UPS_FLAGS_EN_GREEN; if (tp->ups_info.aldps) ups_flags |= UPS_FLAGS_EN_ALDPS; if (tp->ups_info.eee) ups_flags |= UPS_FLAGS_EN_EEE; if (tp->ups_info.flow_control) ups_flags |= UPS_FLAGS_EN_FLOW_CTR; if (tp->ups_info.eee_ckdiv) ups_flags |= UPS_FLAGS_EN_EEE_CKDIV; if (tp->ups_info._10m_ckdiv) ups_flags |= UPS_FLAGS_EN_10M_CKDIV; if (tp->ups_info.eee_plloff_100) ups_flags |= UPS_FLAGS_EEE_PLLOFF_100; if (tp->ups_info.eee_plloff_giga) ups_flags |= UPS_FLAGS_EEE_PLLOFF_GIGA; if (tp->ups_info._250m_ckdiv) ups_flags |= UPS_FLAGS_250M_CKDIV; switch (tp->ups_info.speed_duplex) { case FORCE_10M_HALF: ups_flags |= ups_flags_speed(0); break; case FORCE_10M_FULL: ups_flags |= ups_flags_speed(1); break; case FORCE_100M_HALF: ups_flags |= ups_flags_speed(2); break; case FORCE_100M_FULL: ups_flags |= ups_flags_speed(3); break; case NWAY_10M_HALF: ups_flags |= ups_flags_speed(4); break; case NWAY_10M_FULL: ups_flags |= ups_flags_speed(5); break; case NWAY_100M_HALF: ups_flags |= ups_flags_speed(6); break; case NWAY_100M_FULL: ups_flags |= ups_flags_speed(7); break; case NWAY_1000M_FULL: ups_flags |= ups_flags_speed(8); break; case NWAY_2500M_FULL: ups_flags |= ups_flags_speed(9); break; default: break; } switch (tp->ups_info.lite_mode) { case 1: ups_flags |= 0 << 5; break; case 2: ups_flags |= 2 << 5; break; case 0: default: ups_flags |= 1 << 5; break; } ocp_write_dword(tp, MCU_TYPE_USB, USB_UPS_FLAGS, ups_flags); } static void rtl_green_en(struct r8152 *tp, bool enable) { u16 data; data = sram_read(tp, SRAM_GREEN_CFG); if (enable) data |= GREEN_ETH_EN; else data &= ~GREEN_ETH_EN; sram_write(tp, SRAM_GREEN_CFG, data); tp->ups_info.green = enable; } static void r8153b_green_en(struct r8152 *tp, bool enable) { if (enable) { sram_write(tp, 0x8045, 0); /* 10M abiq&ldvbias */ sram_write(tp, 0x804d, 0x1222); /* 100M short abiq&ldvbias */ sram_write(tp, 0x805d, 0x0022); /* 1000M short abiq&ldvbias */ } else { sram_write(tp, 0x8045, 0x2444); /* 10M abiq&ldvbias */ sram_write(tp, 0x804d, 0x2444); /* 100M short abiq&ldvbias */ sram_write(tp, 0x805d, 0x2444); /* 1000M short abiq&ldvbias */ } rtl_green_en(tp, true); } static u16 r8153_phy_status(struct r8152 *tp, u16 desired) { u16 data; int i; for (i = 0; i < 500; i++) { data = ocp_reg_read(tp, OCP_PHY_STATUS); data &= PHY_STAT_MASK; if (desired) { if (data == desired) break; } else if (data == PHY_STAT_LAN_ON || data == PHY_STAT_PWRDN || data == PHY_STAT_EXT_INIT) { break; } msleep(20); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; } return data; } static void r8153b_ups_en(struct r8152 *tp, bool enable) { u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_POWER_CUT); if (enable) { r8153b_ups_flags(tp); ocp_data |= UPS_EN | USP_PREWAKE | PHASE2_EN; ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); ocp_data |= UPS_FORCE_PWR_DOWN; ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); } else { ocp_data &= ~(UPS_EN | USP_PREWAKE); ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); ocp_data &= ~UPS_FORCE_PWR_DOWN; ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); if (ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0) & PCUT_STATUS) { int i; for (i = 0; i < 500; i++) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; msleep(20); } tp->rtl_ops.hw_phy_cfg(tp); rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex, tp->advertising); } } } static void r8153c_ups_en(struct r8152 *tp, bool enable) { u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_POWER_CUT); if (enable) { r8153b_ups_flags(tp); ocp_data |= UPS_EN | USP_PREWAKE | PHASE2_EN; ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); ocp_data |= UPS_FORCE_PWR_DOWN; ocp_data &= ~BIT(7); ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); } else { ocp_data &= ~(UPS_EN | USP_PREWAKE); ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); ocp_data &= ~UPS_FORCE_PWR_DOWN; ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); if (ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0) & PCUT_STATUS) { int i; for (i = 0; i < 500; i++) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; msleep(20); } tp->rtl_ops.hw_phy_cfg(tp); rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex, tp->advertising); } ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); ocp_data |= BIT(8); ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); } } static void r8156_ups_en(struct r8152 *tp, bool enable) { u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_POWER_CUT); if (enable) { r8156_ups_flags(tp); ocp_data |= UPS_EN | USP_PREWAKE | PHASE2_EN; ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); ocp_data |= UPS_FORCE_PWR_DOWN; ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); switch (tp->version) { case RTL_VER_13: case RTL_VER_15: ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPHY_XTAL); ocp_data &= ~OOBS_POLLING; ocp_write_byte(tp, MCU_TYPE_USB, USB_UPHY_XTAL, ocp_data); break; default: break; } } else { ocp_data &= ~(UPS_EN | USP_PREWAKE); ocp_write_byte(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); ocp_data &= ~UPS_FORCE_PWR_DOWN; ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); if (ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0) & PCUT_STATUS) { tp->rtl_ops.hw_phy_cfg(tp); rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex, tp->advertising); } } } static void r8153_power_cut_en(struct r8152 *tp, bool enable) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT); if (enable) ocp_data |= PWR_EN | PHASE2_EN; else ocp_data &= ~(PWR_EN | PHASE2_EN); ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); ocp_data &= ~PCUT_STATUS; ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); } static void r8153b_power_cut_en(struct r8152 *tp, bool enable) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT); if (enable) ocp_data |= PWR_EN | PHASE2_EN; else ocp_data &= ~PWR_EN; ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); ocp_data &= ~PCUT_STATUS; ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); } static void r8153_queue_wake(struct r8152 *tp, bool enable) { u32 ocp_data; ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_INDICATE_FALG); if (enable) ocp_data |= UPCOMING_RUNTIME_D3; else ocp_data &= ~UPCOMING_RUNTIME_D3; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_INDICATE_FALG, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_SUSPEND_FLAG); ocp_data &= ~LINK_CHG_EVENT; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_SUSPEND_FLAG, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); ocp_data &= ~LINK_CHANGE_FLAG; ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); } static bool rtl_can_wakeup(struct r8152 *tp) { struct usb_device *udev = tp->udev; return (udev->actconfig->desc.bmAttributes & USB_CONFIG_ATT_WAKEUP); } static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable) { if (enable) { u32 ocp_data; __rtl_set_wol(tp, WAKE_ANY); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); ocp_data |= LINK_OFF_WAKE_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); } else { u32 ocp_data; __rtl_set_wol(tp, tp->saved_wolopts); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); ocp_data &= ~LINK_OFF_WAKE_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); } } static void rtl8153_runtime_enable(struct r8152 *tp, bool enable) { if (enable) { r8153_u1u2en(tp, false); r8153_u2p3en(tp, false); rtl_runtime_suspend_enable(tp, true); } else { rtl_runtime_suspend_enable(tp, false); switch (tp->version) { case RTL_VER_03: case RTL_VER_04: break; case RTL_VER_05: case RTL_VER_06: default: r8153_u2p3en(tp, true); break; } r8153_u1u2en(tp, true); } } static void rtl8153b_runtime_enable(struct r8152 *tp, bool enable) { if (enable) { r8153_queue_wake(tp, true); r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); rtl_runtime_suspend_enable(tp, true); r8153b_ups_en(tp, true); } else { r8153b_ups_en(tp, false); r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); if (tp->udev->speed >= USB_SPEED_SUPER) r8153b_u1u2en(tp, true); } } static void rtl8153c_runtime_enable(struct r8152 *tp, bool enable) { if (enable) { r8153_queue_wake(tp, true); r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); rtl_runtime_suspend_enable(tp, true); r8153c_ups_en(tp, true); } else { r8153c_ups_en(tp, false); r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); r8153b_u1u2en(tp, true); } } static void rtl8156_runtime_enable(struct r8152 *tp, bool enable) { if (enable) { r8153_queue_wake(tp, true); r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); rtl_runtime_suspend_enable(tp, true); } else { r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); r8153_u2p3en(tp, true); if (tp->udev->speed >= USB_SPEED_SUPER) r8153b_u1u2en(tp, true); } } static void r8153_teredo_off(struct r8152 *tp) { u32 ocp_data; switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: case RTL_VER_07: ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG); ocp_data &= ~(TEREDO_SEL | TEREDO_RS_EVENT_MASK | OOB_TEREDO_EN); ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data); break; case RTL_VER_08: case RTL_VER_09: case RTL_TEST_01: case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_14: case RTL_VER_15: default: /* The bit 0 ~ 7 are relative with teredo settings. They are * W1C (write 1 to clear), so set all 1 to disable it. */ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, 0xff); break; } ocp_write_word(tp, MCU_TYPE_PLA, PLA_WDT6_CTRL, WDT6_SET_MODE); ocp_write_word(tp, MCU_TYPE_PLA, PLA_REALWOW_TIMER, 0); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0); } static void rtl_reset_bmu(struct r8152 *tp) { u32 ocp_data; ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_BMU_RESET); ocp_data &= ~(BMU_RESET_EP_IN | BMU_RESET_EP_OUT); ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data); ocp_data |= BMU_RESET_EP_IN | BMU_RESET_EP_OUT; ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data); } /* Clear the bp to stop the firmware before loading a new one */ static void rtl_clear_bp(struct r8152 *tp, u16 type) { u16 bp[16] = {0}; u16 bp_num; switch (tp->version) { case RTL_VER_08: case RTL_VER_09: case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: if (type == MCU_TYPE_USB) { ocp_write_word(tp, MCU_TYPE_USB, USB_BP2_EN, 0); bp_num = 16; break; } fallthrough; case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: ocp_write_byte(tp, type, PLA_BP_EN, 0); fallthrough; case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: bp_num = 8; break; case RTL_VER_14: default: ocp_write_word(tp, type, USB_BP2_EN, 0); bp_num = 16; break; } generic_ocp_write(tp, PLA_BP_0, BYTE_EN_DWORD, bp_num << 1, bp, type); /* wait 3 ms to make sure the firmware is stopped */ usleep_range(3000, 6000); ocp_write_word(tp, type, PLA_BP_BA, 0); } static inline void rtl_reset_ocp_base(struct r8152 *tp) { tp->ocp_base = -1; } static int rtl_phy_patch_request(struct r8152 *tp, bool request, bool wait) { u16 data, check; int i; data = ocp_reg_read(tp, OCP_PHY_PATCH_CMD); if (request) { data |= PATCH_REQUEST; check = 0; } else { data &= ~PATCH_REQUEST; check = PATCH_READY; } ocp_reg_write(tp, OCP_PHY_PATCH_CMD, data); for (i = 0; wait && i < 5000; i++) { u32 ocp_data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; usleep_range(1000, 2000); ocp_data = ocp_reg_read(tp, OCP_PHY_PATCH_STAT); if ((ocp_data & PATCH_READY) ^ check) break; } if (request && wait && !(ocp_reg_read(tp, OCP_PHY_PATCH_STAT) & PATCH_READY)) { dev_err(&tp->intf->dev, "PHY patch request fail\n"); rtl_phy_patch_request(tp, false, false); return -ETIME; } else { return 0; } } static void rtl_patch_key_set(struct r8152 *tp, u16 key_addr, u16 patch_key) { if (patch_key && key_addr) { sram_write(tp, key_addr, patch_key); sram_write(tp, SRAM_PHY_LOCK, PHY_PATCH_LOCK); } else if (key_addr) { u16 data; sram_write(tp, 0x0000, 0x0000); data = ocp_reg_read(tp, OCP_PHY_LOCK); data &= ~PATCH_LOCK; ocp_reg_write(tp, OCP_PHY_LOCK, data); sram_write(tp, key_addr, 0x0000); } else { WARN_ON_ONCE(1); } } static int rtl_pre_ram_code(struct r8152 *tp, u16 key_addr, u16 patch_key, bool wait) { if (rtl_phy_patch_request(tp, true, wait)) return -ETIME; rtl_patch_key_set(tp, key_addr, patch_key); return 0; } static int rtl_post_ram_code(struct r8152 *tp, u16 key_addr, bool wait) { rtl_patch_key_set(tp, key_addr, 0); rtl_phy_patch_request(tp, false, wait); return 0; } static bool rtl8152_is_fw_phy_speed_up_ok(struct r8152 *tp, struct fw_phy_speed_up *phy) { u16 fw_offset; u32 length; bool rc = false; switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: case RTL_VER_07: case RTL_VER_08: case RTL_VER_09: case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_14: goto out; case RTL_VER_13: case RTL_VER_15: default: break; } fw_offset = __le16_to_cpu(phy->fw_offset); length = __le32_to_cpu(phy->blk_hdr.length); if (fw_offset < sizeof(*phy) || length <= fw_offset) { dev_err(&tp->intf->dev, "invalid fw_offset\n"); goto out; } length -= fw_offset; if (length & 3) { dev_err(&tp->intf->dev, "invalid block length\n"); goto out; } if (__le16_to_cpu(phy->fw_reg) != 0x9A00) { dev_err(&tp->intf->dev, "invalid register to load firmware\n"); goto out; } rc = true; out: return rc; } static bool rtl8152_is_fw_phy_ver_ok(struct r8152 *tp, struct fw_phy_ver *ver) { bool rc = false; switch (tp->version) { case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: break; default: goto out; } if (__le32_to_cpu(ver->blk_hdr.length) != sizeof(*ver)) { dev_err(&tp->intf->dev, "invalid block length\n"); goto out; } if (__le16_to_cpu(ver->ver.addr) != SRAM_GPHY_FW_VER) { dev_err(&tp->intf->dev, "invalid phy ver addr\n"); goto out; } rc = true; out: return rc; } static bool rtl8152_is_fw_phy_fixup_ok(struct r8152 *tp, struct fw_phy_fixup *fix) { bool rc = false; switch (tp->version) { case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: break; default: goto out; } if (__le32_to_cpu(fix->blk_hdr.length) != sizeof(*fix)) { dev_err(&tp->intf->dev, "invalid block length\n"); goto out; } if (__le16_to_cpu(fix->setting.addr) != OCP_PHY_PATCH_CMD || __le16_to_cpu(fix->setting.data) != BIT(7)) { dev_err(&tp->intf->dev, "invalid phy fixup\n"); goto out; } rc = true; out: return rc; } static bool rtl8152_is_fw_phy_union_ok(struct r8152 *tp, struct fw_phy_union *phy) { u16 fw_offset; u32 length; bool rc = false; switch (tp->version) { case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: break; default: goto out; } fw_offset = __le16_to_cpu(phy->fw_offset); length = __le32_to_cpu(phy->blk_hdr.length); if (fw_offset < sizeof(*phy) || length <= fw_offset) { dev_err(&tp->intf->dev, "invalid fw_offset\n"); goto out; } length -= fw_offset; if (length & 1) { dev_err(&tp->intf->dev, "invalid block length\n"); goto out; } if (phy->pre_num > 2) { dev_err(&tp->intf->dev, "invalid pre_num %d\n", phy->pre_num); goto out; } if (phy->bp_num > 8) { dev_err(&tp->intf->dev, "invalid bp_num %d\n", phy->bp_num); goto out; } rc = true; out: return rc; } static bool rtl8152_is_fw_phy_nc_ok(struct r8152 *tp, struct fw_phy_nc *phy) { u32 length; u16 fw_offset, fw_reg, ba_reg, patch_en_addr, mode_reg, bp_start; bool rc = false; switch (tp->version) { case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: fw_reg = 0xa014; ba_reg = 0xa012; patch_en_addr = 0xa01a; mode_reg = 0xb820; bp_start = 0xa000; break; default: goto out; } fw_offset = __le16_to_cpu(phy->fw_offset); if (fw_offset < sizeof(*phy)) { dev_err(&tp->intf->dev, "fw_offset too small\n"); goto out; } length = __le32_to_cpu(phy->blk_hdr.length); if (length < fw_offset) { dev_err(&tp->intf->dev, "invalid fw_offset\n"); goto out; } length -= __le16_to_cpu(phy->fw_offset); if (!length || (length & 1)) { dev_err(&tp->intf->dev, "invalid block length\n"); goto out; } if (__le16_to_cpu(phy->fw_reg) != fw_reg) { dev_err(&tp->intf->dev, "invalid register to load firmware\n"); goto out; } if (__le16_to_cpu(phy->ba_reg) != ba_reg) { dev_err(&tp->intf->dev, "invalid base address register\n"); goto out; } if (__le16_to_cpu(phy->patch_en_addr) != patch_en_addr) { dev_err(&tp->intf->dev, "invalid patch mode enabled register\n"); goto out; } if (__le16_to_cpu(phy->mode_reg) != mode_reg) { dev_err(&tp->intf->dev, "invalid register to switch the mode\n"); goto out; } if (__le16_to_cpu(phy->bp_start) != bp_start) { dev_err(&tp->intf->dev, "invalid start register of break point\n"); goto out; } if (__le16_to_cpu(phy->bp_num) > 4) { dev_err(&tp->intf->dev, "invalid break point number\n"); goto out; } rc = true; out: return rc; } static bool rtl8152_is_fw_mac_ok(struct r8152 *tp, struct fw_mac *mac) { u16 fw_reg, bp_ba_addr, bp_en_addr, bp_start, fw_offset; bool rc = false; u32 length, type; int i, max_bp; type = __le32_to_cpu(mac->blk_hdr.type); if (type == RTL_FW_PLA) { switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: fw_reg = 0xf800; bp_ba_addr = PLA_BP_BA; bp_en_addr = 0; bp_start = PLA_BP_0; max_bp = 8; break; case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: case RTL_VER_08: case RTL_VER_09: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: fw_reg = 0xf800; bp_ba_addr = PLA_BP_BA; bp_en_addr = PLA_BP_EN; bp_start = PLA_BP_0; max_bp = 8; break; case RTL_VER_14: fw_reg = 0xf800; bp_ba_addr = PLA_BP_BA; bp_en_addr = USB_BP2_EN; bp_start = PLA_BP_0; max_bp = 16; break; default: goto out; } } else if (type == RTL_FW_USB) { switch (tp->version) { case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: fw_reg = 0xf800; bp_ba_addr = USB_BP_BA; bp_en_addr = USB_BP_EN; bp_start = USB_BP_0; max_bp = 8; break; case RTL_VER_08: case RTL_VER_09: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_14: case RTL_VER_15: fw_reg = 0xe600; bp_ba_addr = USB_BP_BA; bp_en_addr = USB_BP2_EN; bp_start = USB_BP_0; max_bp = 16; break; case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: default: goto out; } } else { goto out; } fw_offset = __le16_to_cpu(mac->fw_offset); if (fw_offset < sizeof(*mac)) { dev_err(&tp->intf->dev, "fw_offset too small\n"); goto out; } length = __le32_to_cpu(mac->blk_hdr.length); if (length < fw_offset) { dev_err(&tp->intf->dev, "invalid fw_offset\n"); goto out; } length -= fw_offset; if (length < 4 || (length & 3)) { dev_err(&tp->intf->dev, "invalid block length\n"); goto out; } if (__le16_to_cpu(mac->fw_reg) != fw_reg) { dev_err(&tp->intf->dev, "invalid register to load firmware\n"); goto out; } if (__le16_to_cpu(mac->bp_ba_addr) != bp_ba_addr) { dev_err(&tp->intf->dev, "invalid base address register\n"); goto out; } if (__le16_to_cpu(mac->bp_en_addr) != bp_en_addr) { dev_err(&tp->intf->dev, "invalid enabled mask register\n"); goto out; } if (__le16_to_cpu(mac->bp_start) != bp_start) { dev_err(&tp->intf->dev, "invalid start register of break point\n"); goto out; } if (__le16_to_cpu(mac->bp_num) > max_bp) { dev_err(&tp->intf->dev, "invalid break point number\n"); goto out; } for (i = __le16_to_cpu(mac->bp_num); i < max_bp; i++) { if (mac->bp[i]) { dev_err(&tp->intf->dev, "unused bp%u is not zero\n", i); goto out; } } rc = true; out: return rc; } /* Verify the checksum for the firmware file. It is calculated from the version * field to the end of the file. Compare the result with the checksum field to * make sure the file is correct. */ static long rtl8152_fw_verify_checksum(struct r8152 *tp, struct fw_header *fw_hdr, size_t size) { u8 checksum[sizeof(fw_hdr->checksum)]; BUILD_BUG_ON(sizeof(checksum) != SHA256_DIGEST_SIZE); sha256(fw_hdr->version, size - sizeof(checksum), checksum); if (memcmp(fw_hdr->checksum, checksum, sizeof(checksum))) { dev_err(&tp->intf->dev, "checksum fail\n"); return -EFAULT; } return 0; } static long rtl8152_check_firmware(struct r8152 *tp, struct rtl_fw *rtl_fw) { const struct firmware *fw = rtl_fw->fw; struct fw_header *fw_hdr = (struct fw_header *)fw->data; unsigned long fw_flags = 0; long ret = -EFAULT; int i; if (fw->size < sizeof(*fw_hdr)) { dev_err(&tp->intf->dev, "file too small\n"); goto fail; } ret = rtl8152_fw_verify_checksum(tp, fw_hdr, fw->size); if (ret) goto fail; ret = -EFAULT; for (i = sizeof(*fw_hdr); i < fw->size;) { struct fw_block *block = (struct fw_block *)&fw->data[i]; u32 type; if ((i + sizeof(*block)) > fw->size) goto fail; type = __le32_to_cpu(block->type); switch (type) { case RTL_FW_END: if (__le32_to_cpu(block->length) != sizeof(*block)) goto fail; goto fw_end; case RTL_FW_PLA: if (test_bit(FW_FLAGS_PLA, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PLA firmware encountered"); goto fail; } if (!rtl8152_is_fw_mac_ok(tp, (struct fw_mac *)block)) { dev_err(&tp->intf->dev, "check PLA firmware failed\n"); goto fail; } __set_bit(FW_FLAGS_PLA, &fw_flags); break; case RTL_FW_USB: if (test_bit(FW_FLAGS_USB, &fw_flags)) { dev_err(&tp->intf->dev, "multiple USB firmware encountered"); goto fail; } if (!rtl8152_is_fw_mac_ok(tp, (struct fw_mac *)block)) { dev_err(&tp->intf->dev, "check USB firmware failed\n"); goto fail; } __set_bit(FW_FLAGS_USB, &fw_flags); break; case RTL_FW_PHY_START: if (test_bit(FW_FLAGS_START, &fw_flags) || test_bit(FW_FLAGS_NC, &fw_flags) || test_bit(FW_FLAGS_NC1, &fw_flags) || test_bit(FW_FLAGS_NC2, &fw_flags) || test_bit(FW_FLAGS_UC2, &fw_flags) || test_bit(FW_FLAGS_UC, &fw_flags) || test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "check PHY_START fail\n"); goto fail; } if (__le32_to_cpu(block->length) != sizeof(struct fw_phy_patch_key)) { dev_err(&tp->intf->dev, "Invalid length for PHY_START\n"); goto fail; } __set_bit(FW_FLAGS_START, &fw_flags); break; case RTL_FW_PHY_STOP: if (test_bit(FW_FLAGS_STOP, &fw_flags) || !test_bit(FW_FLAGS_START, &fw_flags)) { dev_err(&tp->intf->dev, "Check PHY_STOP fail\n"); goto fail; } if (__le32_to_cpu(block->length) != sizeof(*block)) { dev_err(&tp->intf->dev, "Invalid length for PHY_STOP\n"); goto fail; } __set_bit(FW_FLAGS_STOP, &fw_flags); break; case RTL_FW_PHY_NC: if (!test_bit(FW_FLAGS_START, &fw_flags) || test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "check PHY_NC fail\n"); goto fail; } if (test_bit(FW_FLAGS_NC, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PHY NC encountered\n"); goto fail; } if (!rtl8152_is_fw_phy_nc_ok(tp, (struct fw_phy_nc *)block)) { dev_err(&tp->intf->dev, "check PHY NC firmware failed\n"); goto fail; } __set_bit(FW_FLAGS_NC, &fw_flags); break; case RTL_FW_PHY_UNION_NC: if (!test_bit(FW_FLAGS_START, &fw_flags) || test_bit(FW_FLAGS_NC1, &fw_flags) || test_bit(FW_FLAGS_NC2, &fw_flags) || test_bit(FW_FLAGS_UC2, &fw_flags) || test_bit(FW_FLAGS_UC, &fw_flags) || test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "PHY_UNION_NC out of order\n"); goto fail; } if (test_bit(FW_FLAGS_NC, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PHY_UNION_NC encountered\n"); goto fail; } if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { dev_err(&tp->intf->dev, "check PHY_UNION_NC failed\n"); goto fail; } __set_bit(FW_FLAGS_NC, &fw_flags); break; case RTL_FW_PHY_UNION_NC1: if (!test_bit(FW_FLAGS_START, &fw_flags) || test_bit(FW_FLAGS_NC2, &fw_flags) || test_bit(FW_FLAGS_UC2, &fw_flags) || test_bit(FW_FLAGS_UC, &fw_flags) || test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "PHY_UNION_NC1 out of order\n"); goto fail; } if (test_bit(FW_FLAGS_NC1, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PHY NC1 encountered\n"); goto fail; } if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { dev_err(&tp->intf->dev, "check PHY_UNION_NC1 failed\n"); goto fail; } __set_bit(FW_FLAGS_NC1, &fw_flags); break; case RTL_FW_PHY_UNION_NC2: if (!test_bit(FW_FLAGS_START, &fw_flags) || test_bit(FW_FLAGS_UC2, &fw_flags) || test_bit(FW_FLAGS_UC, &fw_flags) || test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "PHY_UNION_NC2 out of order\n"); goto fail; } if (test_bit(FW_FLAGS_NC2, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PHY NC2 encountered\n"); goto fail; } if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { dev_err(&tp->intf->dev, "check PHY_UNION_NC2 failed\n"); goto fail; } __set_bit(FW_FLAGS_NC2, &fw_flags); break; case RTL_FW_PHY_UNION_UC2: if (!test_bit(FW_FLAGS_START, &fw_flags) || test_bit(FW_FLAGS_UC, &fw_flags) || test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "PHY_UNION_UC2 out of order\n"); goto fail; } if (test_bit(FW_FLAGS_UC2, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PHY UC2 encountered\n"); goto fail; } if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { dev_err(&tp->intf->dev, "check PHY_UNION_UC2 failed\n"); goto fail; } __set_bit(FW_FLAGS_UC2, &fw_flags); break; case RTL_FW_PHY_UNION_UC: if (!test_bit(FW_FLAGS_START, &fw_flags) || test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "PHY_UNION_UC out of order\n"); goto fail; } if (test_bit(FW_FLAGS_UC, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PHY UC encountered\n"); goto fail; } if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { dev_err(&tp->intf->dev, "check PHY_UNION_UC failed\n"); goto fail; } __set_bit(FW_FLAGS_UC, &fw_flags); break; case RTL_FW_PHY_UNION_MISC: if (!rtl8152_is_fw_phy_union_ok(tp, (struct fw_phy_union *)block)) { dev_err(&tp->intf->dev, "check RTL_FW_PHY_UNION_MISC failed\n"); goto fail; } break; case RTL_FW_PHY_FIXUP: if (!rtl8152_is_fw_phy_fixup_ok(tp, (struct fw_phy_fixup *)block)) { dev_err(&tp->intf->dev, "check PHY fixup failed\n"); goto fail; } break; case RTL_FW_PHY_SPEED_UP: if (test_bit(FW_FLAGS_SPEED_UP, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PHY firmware encountered"); goto fail; } if (!rtl8152_is_fw_phy_speed_up_ok(tp, (struct fw_phy_speed_up *)block)) { dev_err(&tp->intf->dev, "check PHY speed up failed\n"); goto fail; } __set_bit(FW_FLAGS_SPEED_UP, &fw_flags); break; case RTL_FW_PHY_VER: if (test_bit(FW_FLAGS_START, &fw_flags) || test_bit(FW_FLAGS_NC, &fw_flags) || test_bit(FW_FLAGS_NC1, &fw_flags) || test_bit(FW_FLAGS_NC2, &fw_flags) || test_bit(FW_FLAGS_UC2, &fw_flags) || test_bit(FW_FLAGS_UC, &fw_flags) || test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "Invalid order to set PHY version\n"); goto fail; } if (test_bit(FW_FLAGS_VER, &fw_flags)) { dev_err(&tp->intf->dev, "multiple PHY version encountered"); goto fail; } if (!rtl8152_is_fw_phy_ver_ok(tp, (struct fw_phy_ver *)block)) { dev_err(&tp->intf->dev, "check PHY version failed\n"); goto fail; } __set_bit(FW_FLAGS_VER, &fw_flags); break; default: dev_warn(&tp->intf->dev, "Unknown type %u is found\n", type); break; } /* next block */ i += ALIGN(__le32_to_cpu(block->length), 8); } fw_end: if (test_bit(FW_FLAGS_START, &fw_flags) && !test_bit(FW_FLAGS_STOP, &fw_flags)) { dev_err(&tp->intf->dev, "without PHY_STOP\n"); goto fail; } return 0; fail: return ret; } static void rtl_ram_code_speed_up(struct r8152 *tp, struct fw_phy_speed_up *phy, bool wait) { u32 len; u8 *data; rtl_reset_ocp_base(tp); if (sram_read(tp, SRAM_GPHY_FW_VER) >= __le16_to_cpu(phy->version)) { dev_dbg(&tp->intf->dev, "PHY firmware has been the newest\n"); return; } len = __le32_to_cpu(phy->blk_hdr.length); len -= __le16_to_cpu(phy->fw_offset); data = (u8 *)phy + __le16_to_cpu(phy->fw_offset); if (rtl_phy_patch_request(tp, true, wait)) return; while (len) { u32 ocp_data, size; int i; if (len < 2048) size = len; else size = 2048; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL); ocp_data |= GPHY_PATCH_DONE | BACKUP_RESTRORE; ocp_write_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL, ocp_data); generic_ocp_write(tp, __le16_to_cpu(phy->fw_reg), 0xff, size, data, MCU_TYPE_USB); data += size; len -= size; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_POL_GPIO_CTRL); ocp_data |= POL_GPHY_PATCH; ocp_write_word(tp, MCU_TYPE_PLA, PLA_POL_GPIO_CTRL, ocp_data); for (i = 0; i < 1000; i++) { if (!(ocp_read_word(tp, MCU_TYPE_PLA, PLA_POL_GPIO_CTRL) & POL_GPHY_PATCH)) break; } if (i == 1000) { dev_err(&tp->intf->dev, "ram code speedup mode timeout\n"); break; } } rtl_reset_ocp_base(tp); rtl_phy_patch_request(tp, false, wait); if (sram_read(tp, SRAM_GPHY_FW_VER) == __le16_to_cpu(phy->version)) dev_dbg(&tp->intf->dev, "successfully applied %s\n", phy->info); else dev_err(&tp->intf->dev, "ram code speedup mode fail\n"); } static int rtl8152_fw_phy_ver(struct r8152 *tp, struct fw_phy_ver *phy_ver) { u16 ver_addr, ver; ver_addr = __le16_to_cpu(phy_ver->ver.addr); ver = __le16_to_cpu(phy_ver->ver.data); rtl_reset_ocp_base(tp); if (sram_read(tp, ver_addr) >= ver) { dev_dbg(&tp->intf->dev, "PHY firmware has been the newest\n"); return 0; } sram_write(tp, ver_addr, ver); dev_dbg(&tp->intf->dev, "PHY firmware version %x\n", ver); return ver; } static void rtl8152_fw_phy_fixup(struct r8152 *tp, struct fw_phy_fixup *fix) { u16 addr, data; rtl_reset_ocp_base(tp); addr = __le16_to_cpu(fix->setting.addr); data = ocp_reg_read(tp, addr); switch (__le16_to_cpu(fix->bit_cmd)) { case FW_FIXUP_AND: data &= __le16_to_cpu(fix->setting.data); break; case FW_FIXUP_OR: data |= __le16_to_cpu(fix->setting.data); break; case FW_FIXUP_NOT: data &= ~__le16_to_cpu(fix->setting.data); break; case FW_FIXUP_XOR: data ^= __le16_to_cpu(fix->setting.data); break; default: return; } ocp_reg_write(tp, addr, data); dev_dbg(&tp->intf->dev, "applied ocp %x %x\n", addr, data); } static void rtl8152_fw_phy_union_apply(struct r8152 *tp, struct fw_phy_union *phy) { __le16 *data; u32 length; int i, num; rtl_reset_ocp_base(tp); num = phy->pre_num; for (i = 0; i < num; i++) sram_write(tp, __le16_to_cpu(phy->pre_set[i].addr), __le16_to_cpu(phy->pre_set[i].data)); length = __le32_to_cpu(phy->blk_hdr.length); length -= __le16_to_cpu(phy->fw_offset); num = length / 2; data = (__le16 *)((u8 *)phy + __le16_to_cpu(phy->fw_offset)); ocp_reg_write(tp, OCP_SRAM_ADDR, __le16_to_cpu(phy->fw_reg)); for (i = 0; i < num; i++) ocp_reg_write(tp, OCP_SRAM_DATA, __le16_to_cpu(data[i])); num = phy->bp_num; for (i = 0; i < num; i++) sram_write(tp, __le16_to_cpu(phy->bp[i].addr), __le16_to_cpu(phy->bp[i].data)); if (phy->bp_num && phy->bp_en.addr) sram_write(tp, __le16_to_cpu(phy->bp_en.addr), __le16_to_cpu(phy->bp_en.data)); dev_dbg(&tp->intf->dev, "successfully applied %s\n", phy->info); } static void rtl8152_fw_phy_nc_apply(struct r8152 *tp, struct fw_phy_nc *phy) { u16 mode_reg, bp_index; u32 length, i, num; __le16 *data; rtl_reset_ocp_base(tp); mode_reg = __le16_to_cpu(phy->mode_reg); sram_write(tp, mode_reg, __le16_to_cpu(phy->mode_pre)); sram_write(tp, __le16_to_cpu(phy->ba_reg), __le16_to_cpu(phy->ba_data)); length = __le32_to_cpu(phy->blk_hdr.length); length -= __le16_to_cpu(phy->fw_offset); num = length / 2; data = (__le16 *)((u8 *)phy + __le16_to_cpu(phy->fw_offset)); ocp_reg_write(tp, OCP_SRAM_ADDR, __le16_to_cpu(phy->fw_reg)); for (i = 0; i < num; i++) ocp_reg_write(tp, OCP_SRAM_DATA, __le16_to_cpu(data[i])); sram_write(tp, __le16_to_cpu(phy->patch_en_addr), __le16_to_cpu(phy->patch_en_value)); bp_index = __le16_to_cpu(phy->bp_start); num = __le16_to_cpu(phy->bp_num); for (i = 0; i < num; i++) { sram_write(tp, bp_index, __le16_to_cpu(phy->bp[i])); bp_index += 2; } sram_write(tp, mode_reg, __le16_to_cpu(phy->mode_post)); dev_dbg(&tp->intf->dev, "successfully applied %s\n", phy->info); } static void rtl8152_fw_mac_apply(struct r8152 *tp, struct fw_mac *mac) { u16 bp_en_addr, type, fw_ver_reg; u32 length; u8 *data; switch (__le32_to_cpu(mac->blk_hdr.type)) { case RTL_FW_PLA: type = MCU_TYPE_PLA; break; case RTL_FW_USB: type = MCU_TYPE_USB; break; default: return; } fw_ver_reg = __le16_to_cpu(mac->fw_ver_reg); if (fw_ver_reg && ocp_read_byte(tp, MCU_TYPE_USB, fw_ver_reg) >= mac->fw_ver_data) { dev_dbg(&tp->intf->dev, "%s firmware has been the newest\n", type ? "PLA" : "USB"); return; } rtl_clear_bp(tp, type); /* Enable backup/restore of MACDBG. This is required after clearing PLA * break points and before applying the PLA firmware. */ if (tp->version == RTL_VER_04 && type == MCU_TYPE_PLA && !(ocp_read_word(tp, MCU_TYPE_PLA, PLA_MACDBG_POST) & DEBUG_OE)) { ocp_write_word(tp, MCU_TYPE_PLA, PLA_MACDBG_PRE, DEBUG_LTSSM); ocp_write_word(tp, MCU_TYPE_PLA, PLA_MACDBG_POST, DEBUG_LTSSM); } length = __le32_to_cpu(mac->blk_hdr.length); length -= __le16_to_cpu(mac->fw_offset); data = (u8 *)mac; data += __le16_to_cpu(mac->fw_offset); if (generic_ocp_write(tp, __le16_to_cpu(mac->fw_reg), 0xff, length, data, type) < 0) { dev_err(&tp->intf->dev, "Write %s fw fail\n", type ? "PLA" : "USB"); return; } ocp_write_word(tp, type, __le16_to_cpu(mac->bp_ba_addr), __le16_to_cpu(mac->bp_ba_value)); if (generic_ocp_write(tp, __le16_to_cpu(mac->bp_start), BYTE_EN_DWORD, ALIGN(__le16_to_cpu(mac->bp_num) << 1, 4), mac->bp, type) < 0) { dev_err(&tp->intf->dev, "Write %s bp fail\n", type ? "PLA" : "USB"); return; } bp_en_addr = __le16_to_cpu(mac->bp_en_addr); if (bp_en_addr) ocp_write_word(tp, type, bp_en_addr, __le16_to_cpu(mac->bp_en_value)); if (fw_ver_reg) ocp_write_byte(tp, MCU_TYPE_USB, fw_ver_reg, mac->fw_ver_data); dev_dbg(&tp->intf->dev, "successfully applied %s\n", mac->info); } static void rtl8152_apply_firmware(struct r8152 *tp, bool power_cut) { struct rtl_fw *rtl_fw = &tp->rtl_fw; const struct firmware *fw; struct fw_header *fw_hdr; struct fw_phy_patch_key *key; u16 key_addr = 0; int i, patch_phy = 1; if (IS_ERR_OR_NULL(rtl_fw->fw)) return; fw = rtl_fw->fw; fw_hdr = (struct fw_header *)fw->data; if (rtl_fw->pre_fw) rtl_fw->pre_fw(tp); for (i = offsetof(struct fw_header, blocks); i < fw->size;) { struct fw_block *block = (struct fw_block *)&fw->data[i]; switch (__le32_to_cpu(block->type)) { case RTL_FW_END: goto post_fw; case RTL_FW_PLA: case RTL_FW_USB: rtl8152_fw_mac_apply(tp, (struct fw_mac *)block); break; case RTL_FW_PHY_START: if (!patch_phy) break; key = (struct fw_phy_patch_key *)block; key_addr = __le16_to_cpu(key->key_reg); rtl_pre_ram_code(tp, key_addr, __le16_to_cpu(key->key_data), !power_cut); break; case RTL_FW_PHY_STOP: if (!patch_phy) break; WARN_ON(!key_addr); rtl_post_ram_code(tp, key_addr, !power_cut); break; case RTL_FW_PHY_NC: rtl8152_fw_phy_nc_apply(tp, (struct fw_phy_nc *)block); break; case RTL_FW_PHY_VER: patch_phy = rtl8152_fw_phy_ver(tp, (struct fw_phy_ver *)block); break; case RTL_FW_PHY_UNION_NC: case RTL_FW_PHY_UNION_NC1: case RTL_FW_PHY_UNION_NC2: case RTL_FW_PHY_UNION_UC2: case RTL_FW_PHY_UNION_UC: case RTL_FW_PHY_UNION_MISC: if (patch_phy) rtl8152_fw_phy_union_apply(tp, (struct fw_phy_union *)block); break; case RTL_FW_PHY_FIXUP: if (patch_phy) rtl8152_fw_phy_fixup(tp, (struct fw_phy_fixup *)block); break; case RTL_FW_PHY_SPEED_UP: rtl_ram_code_speed_up(tp, (struct fw_phy_speed_up *)block, !power_cut); break; default: break; } i += ALIGN(__le32_to_cpu(block->length), 8); } post_fw: if (rtl_fw->post_fw) rtl_fw->post_fw(tp); rtl_reset_ocp_base(tp); strscpy(rtl_fw->version, fw_hdr->version, RTL_VER_SIZE); dev_dbg(&tp->intf->dev, "load %s successfully\n", rtl_fw->version); } static void rtl8152_release_firmware(struct r8152 *tp) { struct rtl_fw *rtl_fw = &tp->rtl_fw; if (!IS_ERR_OR_NULL(rtl_fw->fw)) { release_firmware(rtl_fw->fw); rtl_fw->fw = NULL; } } static int rtl8152_request_firmware(struct r8152 *tp) { struct rtl_fw *rtl_fw = &tp->rtl_fw; long rc; if (rtl_fw->fw || !rtl_fw->fw_name) { dev_info(&tp->intf->dev, "skip request firmware\n"); rc = 0; goto result; } rc = request_firmware(&rtl_fw->fw, rtl_fw->fw_name, &tp->intf->dev); if (rc < 0) goto result; rc = rtl8152_check_firmware(tp, rtl_fw); if (rc < 0) release_firmware(rtl_fw->fw); result: if (rc) { rtl_fw->fw = ERR_PTR(rc); dev_warn(&tp->intf->dev, "unable to load firmware patch %s (%ld)\n", rtl_fw->fw_name, rc); } return rc; } static void r8152_aldps_en(struct r8152 *tp, bool enable) { if (enable) { ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPWRSAVE | ENPDNPS | LINKENA | DIS_SDSAVE); } else { ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPDNPS | LINKENA | DIS_SDSAVE); msleep(20); } } static inline void r8152_mmd_indirect(struct r8152 *tp, u16 dev, u16 reg) { ocp_reg_write(tp, OCP_EEE_AR, FUN_ADDR | dev); ocp_reg_write(tp, OCP_EEE_DATA, reg); ocp_reg_write(tp, OCP_EEE_AR, FUN_DATA | dev); } static u16 r8152_mmd_read(struct r8152 *tp, u16 dev, u16 reg) { u16 data; r8152_mmd_indirect(tp, dev, reg); data = ocp_reg_read(tp, OCP_EEE_DATA); ocp_reg_write(tp, OCP_EEE_AR, 0x0000); return data; } static void r8152_mmd_write(struct r8152 *tp, u16 dev, u16 reg, u16 data) { r8152_mmd_indirect(tp, dev, reg); ocp_reg_write(tp, OCP_EEE_DATA, data); ocp_reg_write(tp, OCP_EEE_AR, 0x0000); } static void r8152_eee_en(struct r8152 *tp, bool enable) { u16 config1, config2, config3; u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); config1 = ocp_reg_read(tp, OCP_EEE_CONFIG1) & ~sd_rise_time_mask; config2 = ocp_reg_read(tp, OCP_EEE_CONFIG2); config3 = ocp_reg_read(tp, OCP_EEE_CONFIG3) & ~fast_snr_mask; if (enable) { ocp_data |= EEE_RX_EN | EEE_TX_EN; config1 |= EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | RX_QUIET_EN; config1 |= sd_rise_time(1); config2 |= RG_DACQUIET_EN | RG_LDVQUIET_EN; config3 |= fast_snr(42); } else { ocp_data &= ~(EEE_RX_EN | EEE_TX_EN); config1 &= ~(EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | RX_QUIET_EN); config1 |= sd_rise_time(7); config2 &= ~(RG_DACQUIET_EN | RG_LDVQUIET_EN); config3 |= fast_snr(511); } ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data); ocp_reg_write(tp, OCP_EEE_CONFIG1, config1); ocp_reg_write(tp, OCP_EEE_CONFIG2, config2); ocp_reg_write(tp, OCP_EEE_CONFIG3, config3); } static void r8153_eee_en(struct r8152 *tp, bool enable) { u32 ocp_data; u16 config; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); config = ocp_reg_read(tp, OCP_EEE_CFG); if (enable) { ocp_data |= EEE_RX_EN | EEE_TX_EN; config |= EEE10_EN; } else { ocp_data &= ~(EEE_RX_EN | EEE_TX_EN); config &= ~EEE10_EN; } ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data); ocp_reg_write(tp, OCP_EEE_CFG, config); tp->ups_info.eee = enable; } static void r8156_eee_en(struct r8152 *tp, bool enable) { u16 config; r8153_eee_en(tp, enable); config = ocp_reg_read(tp, OCP_EEE_ADV2); if (enable) config |= MDIO_EEE_2_5GT; else config &= ~MDIO_EEE_2_5GT; ocp_reg_write(tp, OCP_EEE_ADV2, config); } static void rtl_eee_enable(struct r8152 *tp, bool enable) { switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: if (enable) { r8152_eee_en(tp, true); r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, tp->eee_adv); } else { r8152_eee_en(tp, false); r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0); } break; case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: case RTL_VER_08: case RTL_VER_09: case RTL_VER_14: if (enable) { r8153_eee_en(tp, true); ocp_reg_write(tp, OCP_EEE_ADV, tp->eee_adv); } else { r8153_eee_en(tp, false); ocp_reg_write(tp, OCP_EEE_ADV, 0); } break; case RTL_VER_10: case RTL_VER_11: case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: if (enable) { r8156_eee_en(tp, true); ocp_reg_write(tp, OCP_EEE_ADV, tp->eee_adv); } else { r8156_eee_en(tp, false); ocp_reg_write(tp, OCP_EEE_ADV, 0); } break; default: break; } } static void r8152b_enable_fc(struct r8152 *tp) { u16 anar; anar = r8152_mdio_read(tp, MII_ADVERTISE); anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; r8152_mdio_write(tp, MII_ADVERTISE, anar); tp->ups_info.flow_control = true; } static void rtl8152_disable(struct r8152 *tp) { r8152_aldps_en(tp, false); rtl_disable(tp); r8152_aldps_en(tp, true); } static void r8152b_hw_phy_cfg(struct r8152 *tp) { rtl8152_apply_firmware(tp, false); rtl_eee_enable(tp, tp->eee_en); r8152_aldps_en(tp, true); r8152b_enable_fc(tp); set_bit(PHY_RESET, &tp->flags); } static void wait_oob_link_list_ready(struct r8152 *tp) { u32 ocp_data; int i; for (i = 0; i < 1000; i++) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if (ocp_data & LINK_LIST_READY) break; usleep_range(1000, 2000); } } static void r8156b_wait_loading_flash(struct r8152 *tp) { if ((ocp_read_word(tp, MCU_TYPE_PLA, PLA_GPHY_CTRL) & GPHY_FLASH) && !(ocp_read_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL) & BYPASS_FLASH)) { int i; for (i = 0; i < 100; i++) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; if (ocp_read_word(tp, MCU_TYPE_USB, USB_GPHY_CTRL) & GPHY_PATCH_DONE) break; usleep_range(1000, 2000); } } } static void r8152b_exit_oob(struct r8152 *tp) { u32 ocp_data; ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data &= ~RCR_ACPT_ALL; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); rxdy_gated_en(tp, true); r8153_teredo_off(tp); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, 0x00); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data &= ~NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data &= ~MCU_BORW_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); wait_oob_link_list_ready(tp); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data |= RE_INIT_LL; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); wait_oob_link_list_ready(tp); rtl8152_nic_reset(tp); /* rx share fifo credit full threshold */ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL); if (tp->udev->speed == USB_SPEED_FULL || tp->udev->speed == USB_SPEED_LOW) { /* rx share fifo credit near full threshold */ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_FULL); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_FULL); } else { /* rx share fifo credit near full threshold */ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_HIGH); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_HIGH); } /* TX share fifo free credit full threshold */ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL2); ocp_write_byte(tp, MCU_TYPE_USB, USB_TX_AGG, TX_AGG_MAX_THRESHOLD); ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_HIGH); ocp_write_dword(tp, MCU_TYPE_USB, USB_TX_DMA, TEST_MODE_DISABLE | TX_SIZE_ADJUST1); rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0); ocp_data |= TCR0_AUTO_FIFO; ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR0, ocp_data); } static void r8152b_enter_oob(struct r8152 *tp) { u32 ocp_data; ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data &= ~NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_OOB); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB); rtl_disable(tp); wait_oob_link_list_ready(tp); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data |= RE_INIT_LL; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); wait_oob_link_list_ready(tp); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS); rtl_rx_vlan_en(tp, true); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_BDC_CR); ocp_data |= ALDPS_PROXY_MODE; ocp_write_word(tp, MCU_TYPE_PLA, PLA_BDC_CR, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); rxdy_gated_en(tp, false); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data |= RCR_APM | RCR_AM | RCR_AB; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); } static int r8153_pre_firmware_1(struct r8152 *tp) { int i; /* Wait till the WTD timer is ready. It would take at most 104 ms. */ for (i = 0; i < 104; i++) { u32 ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_WDT1_CTRL); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; if (!(ocp_data & WTD1_EN)) break; usleep_range(1000, 2000); } return 0; } static int r8153_post_firmware_1(struct r8152 *tp) { /* set USB_BP_4 to support USB_SPEED_SUPER only */ if (ocp_read_byte(tp, MCU_TYPE_USB, USB_CSTMR) & FORCE_SUPER) ocp_write_word(tp, MCU_TYPE_USB, USB_BP_4, BP4_SUPER_ONLY); /* reset UPHY timer to 36 ms */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_UPHY_TIMER, 36000 / 16); return 0; } static int r8153_pre_firmware_2(struct r8152 *tp) { u32 ocp_data; r8153_pre_firmware_1(tp); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN0); ocp_data &= ~FW_FIX_SUSPEND; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN0, ocp_data); return 0; } static int r8153_post_firmware_2(struct r8152 *tp) { u32 ocp_data; /* enable bp0 if support USB_SPEED_SUPER only */ if (ocp_read_byte(tp, MCU_TYPE_USB, USB_CSTMR) & FORCE_SUPER) { ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_BP_EN); ocp_data |= BIT(0); ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_EN, ocp_data); } /* reset UPHY timer to 36 ms */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_UPHY_TIMER, 36000 / 16); /* enable U3P3 check, set the counter to 4 */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, U3P3_CHECK_EN | 4); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN0); ocp_data |= FW_FIX_SUSPEND; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN0, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_USB2PHY); ocp_data |= USB2PHY_L1 | USB2PHY_SUSPEND; ocp_write_byte(tp, MCU_TYPE_USB, USB_USB2PHY, ocp_data); return 0; } static int r8153_post_firmware_3(struct r8152 *tp) { u32 ocp_data; ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_USB2PHY); ocp_data |= USB2PHY_L1 | USB2PHY_SUSPEND; ocp_write_byte(tp, MCU_TYPE_USB, USB_USB2PHY, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1); ocp_data |= FW_IP_RESET_EN; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1, ocp_data); return 0; } static int r8153b_pre_firmware_1(struct r8152 *tp) { /* enable fc timer and set timer to 1 second. */ ocp_write_word(tp, MCU_TYPE_USB, USB_FC_TIMER, CTRL_TIMER_EN | (1000 / 8)); return 0; } static int r8153b_post_firmware_1(struct r8152 *tp) { u32 ocp_data; /* enable bp0 for RTL8153-BND */ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1); if (ocp_data & BND_MASK) { ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_BP_EN); ocp_data |= BIT(0); ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_EN, ocp_data); } ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_CTRL); ocp_data |= FLOW_CTRL_PATCH_OPT; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); ocp_data |= FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1); ocp_data |= FW_IP_RESET_EN; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1, ocp_data); return 0; } static int r8153c_post_firmware_1(struct r8152 *tp) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_CTRL); ocp_data |= FLOW_CTRL_PATCH_2; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); ocp_data |= FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); return 0; } static int r8156a_post_firmware_1(struct r8152 *tp) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1); ocp_data |= FW_IP_RESET_EN; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_FIX_EN1, ocp_data); /* Modify U3PHY parameter for compatibility issue */ ocp_write_dword(tp, MCU_TYPE_USB, USB_UPHY3_MDCMDIO, 0x4026840e); ocp_write_dword(tp, MCU_TYPE_USB, USB_UPHY3_MDCMDIO, 0x4001acc9); return 0; } static void r8153_aldps_en(struct r8152 *tp, bool enable) { u16 data; data = ocp_reg_read(tp, OCP_POWER_CFG); if (enable) { data |= EN_ALDPS; ocp_reg_write(tp, OCP_POWER_CFG, data); } else { int i; data &= ~EN_ALDPS; ocp_reg_write(tp, OCP_POWER_CFG, data); for (i = 0; i < 20; i++) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; usleep_range(1000, 2000); if (ocp_read_word(tp, MCU_TYPE_PLA, 0xe000) & 0x0100) break; } } tp->ups_info.aldps = enable; } static void r8153_hw_phy_cfg(struct r8152 *tp) { u32 ocp_data; u16 data; /* disable ALDPS before updating the PHY parameters */ r8153_aldps_en(tp, false); /* disable EEE before updating the PHY parameters */ rtl_eee_enable(tp, false); rtl8152_apply_firmware(tp, false); if (tp->version == RTL_VER_03) { data = ocp_reg_read(tp, OCP_EEE_CFG); data &= ~CTAP_SHORT_EN; ocp_reg_write(tp, OCP_EEE_CFG, data); } data = ocp_reg_read(tp, OCP_POWER_CFG); data |= EEE_CLKDIV_EN; ocp_reg_write(tp, OCP_POWER_CFG, data); data = ocp_reg_read(tp, OCP_DOWN_SPEED); data |= EN_10M_BGOFF; ocp_reg_write(tp, OCP_DOWN_SPEED, data); data = ocp_reg_read(tp, OCP_POWER_CFG); data |= EN_10M_PLLOFF; ocp_reg_write(tp, OCP_POWER_CFG, data); sram_write(tp, SRAM_IMPEDANCE, 0x0b13); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); ocp_data |= PFM_PWM_SWITCH; ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); /* Enable LPF corner auto tune */ sram_write(tp, SRAM_LPF_CFG, 0xf70f); /* Adjust 10M Amplitude */ sram_write(tp, SRAM_10M_AMP1, 0x00af); sram_write(tp, SRAM_10M_AMP2, 0x0208); if (tp->eee_en) rtl_eee_enable(tp, true); r8153_aldps_en(tp, true); r8152b_enable_fc(tp); switch (tp->version) { case RTL_VER_03: case RTL_VER_04: break; case RTL_VER_05: case RTL_VER_06: default: r8153_u2p3en(tp, true); break; } set_bit(PHY_RESET, &tp->flags); } static u32 r8152_efuse_read(struct r8152 *tp, u8 addr) { u32 ocp_data; ocp_write_word(tp, MCU_TYPE_PLA, PLA_EFUSE_CMD, EFUSE_READ_CMD | addr); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EFUSE_CMD); ocp_data = (ocp_data & EFUSE_DATA_BIT16) << 9; /* data of bit16 */ ocp_data |= ocp_read_word(tp, MCU_TYPE_PLA, PLA_EFUSE_DATA); return ocp_data; } static void r8153b_hw_phy_cfg(struct r8152 *tp) { u32 ocp_data; u16 data; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); if (ocp_data & PCUT_STATUS) { ocp_data &= ~PCUT_STATUS; ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); } /* disable ALDPS before updating the PHY parameters */ r8153_aldps_en(tp, false); /* disable EEE before updating the PHY parameters */ rtl_eee_enable(tp, false); /* U1/U2/L1 idle timer. 500 us */ ocp_write_word(tp, MCU_TYPE_USB, USB_U1U2_TIMER, 500); data = r8153_phy_status(tp, 0); switch (data) { case PHY_STAT_PWRDN: case PHY_STAT_EXT_INIT: rtl8152_apply_firmware(tp, true); data = r8152_mdio_read(tp, MII_BMCR); data &= ~BMCR_PDOWN; r8152_mdio_write(tp, MII_BMCR, data); break; case PHY_STAT_LAN_ON: default: rtl8152_apply_firmware(tp, false); break; } r8153b_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags)); data = sram_read(tp, SRAM_GREEN_CFG); data |= R_TUNE_EN; sram_write(tp, SRAM_GREEN_CFG, data); data = ocp_reg_read(tp, OCP_NCTL_CFG); data |= PGA_RETURN_EN; ocp_reg_write(tp, OCP_NCTL_CFG, data); /* ADC Bias Calibration: * read efuse offset 0x7d to get a 17-bit data. Remove the dummy/fake * bit (bit3) to rebuild the real 16-bit data. Write the data to the * ADC ioffset. */ ocp_data = r8152_efuse_read(tp, 0x7d); data = (u16)(((ocp_data & 0x1fff0) >> 1) | (ocp_data & 0x7)); if (data != 0xffff) ocp_reg_write(tp, OCP_ADC_IOFFSET, data); /* ups mode tx-link-pulse timing adjustment: * rg_saw_cnt = OCP reg 0xC426 Bit[13:0] * swr_cnt_1ms_ini = 16000000 / rg_saw_cnt */ ocp_data = ocp_reg_read(tp, 0xc426); ocp_data &= 0x3fff; if (ocp_data) { u32 swr_cnt_1ms_ini; swr_cnt_1ms_ini = (16000000 / ocp_data) & SAW_CNT_1MS_MASK; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CFG); ocp_data = (ocp_data & ~SAW_CNT_1MS_MASK) | swr_cnt_1ms_ini; ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CFG, ocp_data); } ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); ocp_data |= PFM_PWM_SWITCH; ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); /* Advnace EEE */ if (!rtl_phy_patch_request(tp, true, true)) { data = ocp_reg_read(tp, OCP_POWER_CFG); data |= EEE_CLKDIV_EN; ocp_reg_write(tp, OCP_POWER_CFG, data); tp->ups_info.eee_ckdiv = true; data = ocp_reg_read(tp, OCP_DOWN_SPEED); data |= EN_EEE_CMODE | EN_EEE_1000 | EN_10M_CLKDIV; ocp_reg_write(tp, OCP_DOWN_SPEED, data); tp->ups_info.eee_cmod_lv = true; tp->ups_info._10m_ckdiv = true; tp->ups_info.eee_plloff_giga = true; ocp_reg_write(tp, OCP_SYSCLK_CFG, 0); ocp_reg_write(tp, OCP_SYSCLK_CFG, clk_div_expo(5)); tp->ups_info._250m_ckdiv = true; rtl_phy_patch_request(tp, false, true); } if (tp->eee_en) rtl_eee_enable(tp, true); r8153_aldps_en(tp, true); r8152b_enable_fc(tp); set_bit(PHY_RESET, &tp->flags); } static void r8153c_hw_phy_cfg(struct r8152 *tp) { r8153b_hw_phy_cfg(tp); tp->ups_info.r_tune = true; } static void rtl8153_change_mtu(struct r8152 *tp) { ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, mtu_to_size(tp->netdev->mtu)); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO); } static void r8153_first_init(struct r8152 *tp) { u32 ocp_data; rxdy_gated_en(tp, true); r8153_teredo_off(tp); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data &= ~RCR_ACPT_ALL; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); rtl8152_nic_reset(tp); rtl_reset_bmu(tp); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data &= ~NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data &= ~MCU_BORW_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); wait_oob_link_list_ready(tp); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data |= RE_INIT_LL; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); wait_oob_link_list_ready(tp); rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); rtl8153_change_mtu(tp); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0); ocp_data |= TCR0_AUTO_FIFO; ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR0, ocp_data); rtl8152_nic_reset(tp); /* rx share fifo credit full threshold */ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_NORMAL); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_NORMAL); /* TX share fifo free credit full threshold */ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL2); } static void r8153_enter_oob(struct r8152 *tp) { u32 ocp_data; ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data &= ~NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); /* RX FIFO settings for OOB */ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_OOB); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB); rtl_disable(tp); rtl_reset_bmu(tp); wait_oob_link_list_ready(tp); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data |= RE_INIT_LL; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); wait_oob_link_list_ready(tp); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, 1522); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_DEFAULT); switch (tp->version) { case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG); ocp_data &= ~TEREDO_WAKE_MASK; ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data); break; case RTL_VER_08: case RTL_VER_09: case RTL_VER_14: /* Clear teredo wake event. bit[15:8] is the teredo wakeup * type. Set it to zero. bits[7:0] are the W1C bits about * the events. Set them to all 1 to clear them. */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_WAKE_BASE, 0x00ff); break; default: break; } rtl_rx_vlan_en(tp, true); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_BDC_CR); ocp_data |= ALDPS_PROXY_MODE; ocp_write_word(tp, MCU_TYPE_PLA, PLA_BDC_CR, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data |= MCU_BORW_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); rxdy_gated_en(tp, false); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data |= RCR_APM | RCR_AM | RCR_AB; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); } static void rtl8153_disable(struct r8152 *tp) { r8153_aldps_en(tp, false); rtl_disable(tp); rtl_reset_bmu(tp); r8153_aldps_en(tp, true); } static u32 fc_pause_on_auto(struct r8152 *tp) { return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 6 * 1024); } static u32 fc_pause_off_auto(struct r8152 *tp) { return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 14 * 1024); } static void r8156_fc_parameter(struct r8152 *tp) { u32 pause_on = tp->fc_pause_on ? tp->fc_pause_on : fc_pause_on_auto(tp); u32 pause_off = tp->fc_pause_off ? tp->fc_pause_off : fc_pause_off_auto(tp); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16); } static int rtl8156_enable(struct r8152 *tp) { u32 ocp_data; u16 speed; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; r8156_fc_parameter(tp); set_tx_qlen(tp); rtl_set_eee_plus(tp); r8153_set_rx_early_timeout(tp); r8153_set_rx_early_size(tp); speed = rtl8152_get_speed(tp); rtl_set_ifg(tp, speed); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); if (speed & _2500bps) ocp_data &= ~IDLE_SPDWN_EN; else ocp_data |= IDLE_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); if (speed & _1000bps) ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_TXTWSYS, 0x11); else if (speed & _500bps) ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_TXTWSYS, 0x3d); if (tp->udev->speed == USB_SPEED_HIGH) { /* USB 0xb45e[3:0] l1_nyet_hird */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_L1_CTRL); ocp_data &= ~0xf; if (is_flow_control(speed)) ocp_data |= 0xf; else ocp_data |= 0x1; ocp_write_word(tp, MCU_TYPE_USB, USB_L1_CTRL, ocp_data); } ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); ocp_data &= ~FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); usleep_range(1000, 2000); ocp_data |= FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); return rtl_enable(tp); } static void rtl8156_disable(struct r8152 *tp) { ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, 0); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, 0); rtl8153_disable(tp); } static int rtl8156b_enable(struct r8152 *tp) { u32 ocp_data; u16 speed; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; set_tx_qlen(tp); rtl_set_eee_plus(tp); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_RX_AGGR_NUM); ocp_data &= ~RX_AGGR_NUM_MASK; ocp_write_word(tp, MCU_TYPE_USB, USB_RX_AGGR_NUM, ocp_data); r8153_set_rx_early_timeout(tp); r8153_set_rx_early_size(tp); speed = rtl8152_get_speed(tp); rtl_set_ifg(tp, speed); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); if (speed & _2500bps) ocp_data &= ~IDLE_SPDWN_EN; else ocp_data |= IDLE_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); if (tp->udev->speed == USB_SPEED_HIGH) { ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_L1_CTRL); ocp_data &= ~0xf; if (is_flow_control(speed)) ocp_data |= 0xf; else ocp_data |= 0x1; ocp_write_word(tp, MCU_TYPE_USB, USB_L1_CTRL, ocp_data); } ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); ocp_data &= ~FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); usleep_range(1000, 2000); ocp_data |= FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); return rtl_enable(tp); } static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex, u32 advertising) { u16 bmcr; int ret = 0; if (autoneg == AUTONEG_DISABLE) { if (duplex != DUPLEX_HALF && duplex != DUPLEX_FULL) return -EINVAL; switch (speed) { case SPEED_10: bmcr = BMCR_SPEED10; if (duplex == DUPLEX_FULL) { bmcr |= BMCR_FULLDPLX; tp->ups_info.speed_duplex = FORCE_10M_FULL; } else { tp->ups_info.speed_duplex = FORCE_10M_HALF; } break; case SPEED_100: bmcr = BMCR_SPEED100; if (duplex == DUPLEX_FULL) { bmcr |= BMCR_FULLDPLX; tp->ups_info.speed_duplex = FORCE_100M_FULL; } else { tp->ups_info.speed_duplex = FORCE_100M_HALF; } break; case SPEED_1000: if (tp->mii.supports_gmii) { bmcr = BMCR_SPEED1000 | BMCR_FULLDPLX; tp->ups_info.speed_duplex = NWAY_1000M_FULL; break; } fallthrough; default: ret = -EINVAL; goto out; } if (duplex == DUPLEX_FULL) tp->mii.full_duplex = 1; else tp->mii.full_duplex = 0; tp->mii.force_media = 1; } else { u16 orig, new1; u32 support; support = RTL_ADVERTISED_10_HALF | RTL_ADVERTISED_10_FULL | RTL_ADVERTISED_100_HALF | RTL_ADVERTISED_100_FULL; if (tp->mii.supports_gmii) { support |= RTL_ADVERTISED_1000_FULL; if (tp->support_2500full) support |= RTL_ADVERTISED_2500_FULL; } if (!(advertising & support)) return -EINVAL; orig = r8152_mdio_read(tp, MII_ADVERTISE); new1 = orig & ~(ADVERTISE_10HALF | ADVERTISE_10FULL | ADVERTISE_100HALF | ADVERTISE_100FULL); if (advertising & RTL_ADVERTISED_10_HALF) { new1 |= ADVERTISE_10HALF; tp->ups_info.speed_duplex = NWAY_10M_HALF; } if (advertising & RTL_ADVERTISED_10_FULL) { new1 |= ADVERTISE_10FULL; tp->ups_info.speed_duplex = NWAY_10M_FULL; } if (advertising & RTL_ADVERTISED_100_HALF) { new1 |= ADVERTISE_100HALF; tp->ups_info.speed_duplex = NWAY_100M_HALF; } if (advertising & RTL_ADVERTISED_100_FULL) { new1 |= ADVERTISE_100FULL; tp->ups_info.speed_duplex = NWAY_100M_FULL; } if (orig != new1) { r8152_mdio_write(tp, MII_ADVERTISE, new1); tp->mii.advertising = new1; } if (tp->mii.supports_gmii) { orig = r8152_mdio_read(tp, MII_CTRL1000); new1 = orig & ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); if (advertising & RTL_ADVERTISED_1000_FULL) { new1 |= ADVERTISE_1000FULL; tp->ups_info.speed_duplex = NWAY_1000M_FULL; } if (orig != new1) r8152_mdio_write(tp, MII_CTRL1000, new1); } if (tp->support_2500full) { orig = ocp_reg_read(tp, OCP_10GBT_CTRL); new1 = orig & ~MDIO_AN_10GBT_CTRL_ADV2_5G; if (advertising & RTL_ADVERTISED_2500_FULL) { new1 |= MDIO_AN_10GBT_CTRL_ADV2_5G; tp->ups_info.speed_duplex = NWAY_2500M_FULL; } if (orig != new1) ocp_reg_write(tp, OCP_10GBT_CTRL, new1); } bmcr = BMCR_ANENABLE | BMCR_ANRESTART; tp->mii.force_media = 0; } if (test_and_clear_bit(PHY_RESET, &tp->flags)) bmcr |= BMCR_RESET; r8152_mdio_write(tp, MII_BMCR, bmcr); if (bmcr & BMCR_RESET) { int i; for (i = 0; i < 50; i++) { msleep(20); if ((r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET) == 0) break; } } out: return ret; } static void rtl8152_up(struct r8152 *tp) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8152_aldps_en(tp, false); r8152b_exit_oob(tp); r8152_aldps_en(tp, true); } static void rtl8152_down(struct r8152 *tp) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); return; } r8152_power_cut_en(tp, false); r8152_aldps_en(tp, false); r8152b_enter_oob(tp); r8152_aldps_en(tp, true); } static void rtl8153_up(struct r8152 *tp) { u32 ocp_data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153_aldps_en(tp, false); r8153_first_init(tp); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6); ocp_data |= LANWAKE_CLR_EN; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG); ocp_data &= ~LANWAKE_PIN; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1); ocp_data &= ~DELAY_PHY_PWR_CHG; ocp_write_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1, ocp_data); r8153_aldps_en(tp, true); switch (tp->version) { case RTL_VER_03: case RTL_VER_04: break; case RTL_VER_05: case RTL_VER_06: default: r8153_u2p3en(tp, true); break; } r8153_u1u2en(tp, true); } static void rtl8153_down(struct r8152 *tp) { u32 ocp_data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); return; } ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6); ocp_data &= ~LANWAKE_CLR_EN; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data); r8153_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153_power_cut_en(tp, false); r8153_aldps_en(tp, false); r8153_enter_oob(tp); r8153_aldps_en(tp, true); } static void rtl8153b_up(struct r8152 *tp) { u32 ocp_data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153_aldps_en(tp, false); r8153_first_init(tp); ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_B); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ocp_data &= ~PLA_MCU_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); r8153_aldps_en(tp, true); if (tp->udev->speed >= USB_SPEED_SUPER) r8153b_u1u2en(tp, true); } static void rtl8153b_down(struct r8152 *tp) { u32 ocp_data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); return; } ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ocp_data |= PLA_MCU_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153b_power_cut_en(tp, false); r8153_aldps_en(tp, false); r8153_enter_oob(tp); r8153_aldps_en(tp, true); } static void rtl8153c_change_mtu(struct r8152 *tp) { ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, mtu_to_size(tp->netdev->mtu)); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, 10 * 1024 / 64); ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, 512 / 64); /* Adjust the tx fifo free credit full threshold, otherwise * the fifo would be too small to send a jumbo frame packet. */ if (tp->netdev->mtu < 8000) ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_FULL, 2048 / 8); else ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_FULL, 900 / 8); } static void rtl8153c_up(struct r8152 *tp) { u32 ocp_data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153_aldps_en(tp, false); rxdy_gated_en(tp, true); r8153_teredo_off(tp); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data &= ~RCR_ACPT_ALL; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); rtl8152_nic_reset(tp); rtl_reset_bmu(tp); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data &= ~NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data &= ~MCU_BORW_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); wait_oob_link_list_ready(tp); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data |= RE_INIT_LL; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); wait_oob_link_list_ready(tp); rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); rtl8153c_change_mtu(tp); rtl8152_nic_reset(tp); /* rx share fifo credit full threshold */ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, 0x02); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL, 0x08); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_NORMAL); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_NORMAL); ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_B); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); ocp_data |= BIT(8); ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ocp_data &= ~PLA_MCU_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); r8153_aldps_en(tp, true); r8153b_u1u2en(tp, true); } static void rtl8156_change_mtu(struct r8152 *tp) { u32 rx_max_size = mtu_to_size(tp->netdev->mtu); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, rx_max_size); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO); r8156_fc_parameter(tp); /* TX share fifo free credit full threshold */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, 512 / 64); ocp_write_word(tp, MCU_TYPE_PLA, PLA_TXFIFO_FULL, ALIGN(rx_max_size + sizeof(struct tx_desc), 1024) / 16); } static void rtl8156_up(struct r8152 *tp) { u32 ocp_data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153_aldps_en(tp, false); rxdy_gated_en(tp, true); r8153_teredo_off(tp); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data &= ~RCR_ACPT_ALL; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); rtl8152_nic_reset(tp); rtl_reset_bmu(tp); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data &= ~NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data &= ~MCU_BORW_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); rtl8156_change_mtu(tp); switch (tp->version) { case RTL_TEST_01: case RTL_VER_10: case RTL_VER_11: ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_BMU_CONFIG); ocp_data |= ACT_ODMA; ocp_write_word(tp, MCU_TYPE_USB, USB_BMU_CONFIG, ocp_data); break; default: break; } /* share FIFO settings */ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL); ocp_data &= ~RXFIFO_FULL_MASK; ocp_data |= 0x08; ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ocp_data &= ~PLA_MCU_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_SPEED_OPTION); ocp_data &= ~(RG_PWRDN_EN | ALL_SPEED_OFF); ocp_write_word(tp, MCU_TYPE_USB, USB_SPEED_OPTION, ocp_data); ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, 0x00600400); if (tp->saved_wolopts != __rtl_get_wol(tp)) { netif_warn(tp, ifup, tp->netdev, "wol setting is changed\n"); __rtl_set_wol(tp, tp->saved_wolopts); } r8153_aldps_en(tp, true); r8153_u2p3en(tp, true); if (tp->udev->speed >= USB_SPEED_SUPER) r8153b_u1u2en(tp, true); } static void rtl8156_down(struct r8152 *tp) { u32 ocp_data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); return; } ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ocp_data |= PLA_MCU_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153b_power_cut_en(tp, false); r8153_aldps_en(tp, false); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data &= ~NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); /* RX FIFO settings for OOB */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_FULL, 64 / 16); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, 1024 / 16); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, 4096 / 16); rtl_disable(tp); rtl_reset_bmu(tp); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, 1522); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_DEFAULT); /* Clear teredo wake event. bit[15:8] is the teredo wakeup * type. Set it to zero. bits[7:0] are the W1C bits about * the events. Set them to all 1 to clear them. */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_WAKE_BASE, 0x00ff); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data |= NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); ocp_data |= MCU_BORW_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); rtl_rx_vlan_en(tp, true); rxdy_gated_en(tp, false); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data |= RCR_APM | RCR_AM | RCR_AB; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); r8153_aldps_en(tp, true); } static bool rtl8152_in_nway(struct r8152 *tp) { u16 nway_state; ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, 0x2000); tp->ocp_base = 0x2000; ocp_write_byte(tp, MCU_TYPE_PLA, 0xb014, 0x4c); /* phy state */ nway_state = ocp_read_word(tp, MCU_TYPE_PLA, 0xb01a); /* bit 15: TXDIS_STATE, bit 14: ABD_STATE */ if (nway_state & 0xc000) return false; else return true; } static bool rtl8153_in_nway(struct r8152 *tp) { u16 phy_state = ocp_reg_read(tp, OCP_PHY_STATE) & 0xff; if (phy_state == TXDIS_STATE || phy_state == ABD_STATE) return false; else return true; } static void r8156_mdio_force_mode(struct r8152 *tp) { u16 data; /* Select force mode through 0xa5b4 bit 15 * 0: MDIO force mode * 1: MMD force mode */ data = ocp_reg_read(tp, 0xa5b4); if (data & BIT(15)) { data &= ~BIT(15); ocp_reg_write(tp, 0xa5b4, data); } } static void set_carrier(struct r8152 *tp) { struct net_device *netdev = tp->netdev; struct napi_struct *napi = &tp->napi; u16 speed; speed = rtl8152_get_speed(tp); if (speed & LINK_STATUS) { if (!netif_carrier_ok(netdev)) { tp->rtl_ops.enable(tp); netif_stop_queue(netdev); napi_disable(napi); netif_carrier_on(netdev); rtl_start_rx(tp); clear_bit(RTL8152_SET_RX_MODE, &tp->flags); _rtl8152_set_rx_mode(netdev); napi_enable(napi); netif_wake_queue(netdev); netif_info(tp, link, netdev, "carrier on\n"); } else if (netif_queue_stopped(netdev) && skb_queue_len(&tp->tx_queue) < tp->tx_qlen) { netif_wake_queue(netdev); } } else { if (netif_carrier_ok(netdev)) { netif_carrier_off(netdev); tasklet_disable(&tp->tx_tl); napi_disable(napi); tp->rtl_ops.disable(tp); napi_enable(napi); tasklet_enable(&tp->tx_tl); netif_info(tp, link, netdev, "carrier off\n"); } } } static void rtl_work_func_t(struct work_struct *work) { struct r8152 *tp = container_of(work, struct r8152, schedule.work); /* If the device is unplugged or !netif_running(), the workqueue * doesn't need to wake the device, and could return directly. */ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags) || !netif_running(tp->netdev)) return; if (usb_autopm_get_interface(tp->intf) < 0) return; if (!test_bit(WORK_ENABLE, &tp->flags)) goto out1; if (!mutex_trylock(&tp->control)) { schedule_delayed_work(&tp->schedule, 0); goto out1; } if (test_and_clear_bit(RTL8152_LINK_CHG, &tp->flags)) set_carrier(tp); if (test_and_clear_bit(RTL8152_SET_RX_MODE, &tp->flags)) _rtl8152_set_rx_mode(tp->netdev); /* don't schedule tasket before linking */ if (test_and_clear_bit(SCHEDULE_TASKLET, &tp->flags) && netif_carrier_ok(tp->netdev)) tasklet_schedule(&tp->tx_tl); if (test_and_clear_bit(RX_EPROTO, &tp->flags) && !list_empty(&tp->rx_done)) napi_schedule(&tp->napi); mutex_unlock(&tp->control); out1: usb_autopm_put_interface(tp->intf); } static void rtl_hw_phy_work_func_t(struct work_struct *work) { struct r8152 *tp = container_of(work, struct r8152, hw_phy_work.work); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (usb_autopm_get_interface(tp->intf) < 0) return; mutex_lock(&tp->control); if (rtl8152_request_firmware(tp) == -ENODEV && tp->rtl_fw.retry) { tp->rtl_fw.retry = false; tp->rtl_fw.fw = NULL; /* Delay execution in case request_firmware() is not ready yet. */ queue_delayed_work(system_long_wq, &tp->hw_phy_work, HZ * 10); goto ignore_once; } tp->rtl_ops.hw_phy_cfg(tp); rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex, tp->advertising); ignore_once: mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); } #ifdef CONFIG_PM_SLEEP static int rtl_notifier(struct notifier_block *nb, unsigned long action, void *data) { struct r8152 *tp = container_of(nb, struct r8152, pm_notifier); switch (action) { case PM_HIBERNATION_PREPARE: case PM_SUSPEND_PREPARE: usb_autopm_get_interface(tp->intf); break; case PM_POST_HIBERNATION: case PM_POST_SUSPEND: usb_autopm_put_interface(tp->intf); break; case PM_POST_RESTORE: case PM_RESTORE_PREPARE: default: break; } return NOTIFY_DONE; } #endif static int rtl8152_open(struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); int res = 0; if (work_busy(&tp->hw_phy_work.work) & WORK_BUSY_PENDING) { cancel_delayed_work_sync(&tp->hw_phy_work); rtl_hw_phy_work_func_t(&tp->hw_phy_work.work); } res = alloc_all_mem(tp); if (res) goto out; res = usb_autopm_get_interface(tp->intf); if (res < 0) goto out_free; mutex_lock(&tp->control); tp->rtl_ops.up(tp); netif_carrier_off(netdev); netif_start_queue(netdev); set_bit(WORK_ENABLE, &tp->flags); res = usb_submit_urb(tp->intr_urb, GFP_KERNEL); if (res) { if (res == -ENODEV) netif_device_detach(tp->netdev); netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n", res); goto out_unlock; } napi_enable(&tp->napi); tasklet_enable(&tp->tx_tl); mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); #ifdef CONFIG_PM_SLEEP tp->pm_notifier.notifier_call = rtl_notifier; register_pm_notifier(&tp->pm_notifier); #endif return 0; out_unlock: mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); out_free: free_all_mem(tp); out: return res; } static int rtl8152_close(struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); int res = 0; #ifdef CONFIG_PM_SLEEP unregister_pm_notifier(&tp->pm_notifier); #endif tasklet_disable(&tp->tx_tl); clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); cancel_delayed_work_sync(&tp->schedule); napi_disable(&tp->napi); netif_stop_queue(netdev); res = usb_autopm_get_interface(tp->intf); if (res < 0 || test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); rtl_stop_rx(tp); } else { mutex_lock(&tp->control); tp->rtl_ops.down(tp); mutex_unlock(&tp->control); } if (!res) usb_autopm_put_interface(tp->intf); free_all_mem(tp); return res; } static void rtl_tally_reset(struct r8152 *tp) { u32 ocp_data; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_RSTTALLY); ocp_data |= TALLY_RESET; ocp_write_word(tp, MCU_TYPE_PLA, PLA_RSTTALLY, ocp_data); } static void r8152b_init(struct r8152 *tp) { u32 ocp_data; u16 data; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; data = r8152_mdio_read(tp, MII_BMCR); if (data & BMCR_PDOWN) { data &= ~BMCR_PDOWN; r8152_mdio_write(tp, MII_BMCR, data); } r8152_aldps_en(tp, false); if (tp->version == RTL_VER_01) { ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE); ocp_data &= ~LED_MODE_MASK; ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data); } r8152_power_cut_en(tp, false); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); ocp_data |= TX_10M_IDLE_EN | PFM_PWM_SWITCH; ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL); ocp_data &= ~MCU_CLK_RATIO_MASK; ocp_data |= MCU_CLK_RATIO | D3_CLK_GATED_EN; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ocp_data); ocp_data = GPHY_STS_MSK | SPEED_DOWN_MSK | SPDWN_RXDV_MSK | SPDWN_LINKCHG_MSK; ocp_write_word(tp, MCU_TYPE_PLA, PLA_GPHY_INTR_IMR, ocp_data); rtl_tally_reset(tp); /* enable rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); } static void r8153_init(struct r8152 *tp) { u32 ocp_data; u16 data; int i; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153_u1u2en(tp, false); for (i = 0; i < 500; i++) { if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; msleep(20); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; } data = r8153_phy_status(tp, 0); if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 || tp->version == RTL_VER_05) ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L); data = r8152_mdio_read(tp, MII_BMCR); if (data & BMCR_PDOWN) { data &= ~BMCR_PDOWN; r8152_mdio_write(tp, MII_BMCR, data); } data = r8153_phy_status(tp, PHY_STAT_LAN_ON); r8153_u2p3en(tp, false); if (tp->version == RTL_VER_04) { ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_SSPHYLINK2); ocp_data &= ~pwd_dn_scale_mask; ocp_data |= pwd_dn_scale(96); ocp_write_word(tp, MCU_TYPE_USB, USB_SSPHYLINK2, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_USB2PHY); ocp_data |= USB2PHY_L1 | USB2PHY_SUSPEND; ocp_write_byte(tp, MCU_TYPE_USB, USB_USB2PHY, ocp_data); } else if (tp->version == RTL_VER_05) { ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_DMY_REG0); ocp_data &= ~ECM_ALDPS; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_DMY_REG0, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1); if (ocp_read_word(tp, MCU_TYPE_USB, USB_BURST_SIZE) == 0) ocp_data &= ~DYNAMIC_BURST; else ocp_data |= DYNAMIC_BURST; ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data); } else if (tp->version == RTL_VER_06) { ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1); if (ocp_read_word(tp, MCU_TYPE_USB, USB_BURST_SIZE) == 0) ocp_data &= ~DYNAMIC_BURST; else ocp_data |= DYNAMIC_BURST; ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data); r8153_queue_wake(tp, false); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); if (rtl8152_get_speed(tp) & LINK_STATUS) ocp_data |= CUR_LINK_OK; else ocp_data &= ~CUR_LINK_OK; ocp_data |= POLL_LINK_CHG; ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); } ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY2); ocp_data |= EP4_FULL_FC; ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY2, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL); ocp_data &= ~TIMER11_EN; ocp_write_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE); ocp_data &= ~LED_MODE_MASK; ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data); ocp_data = FIFO_EMPTY_1FB | ROK_EXIT_LPM; if (tp->version == RTL_VER_04 && tp->udev->speed < USB_SPEED_SUPER) ocp_data |= LPM_TIMER_500MS; else ocp_data |= LPM_TIMER_500US; ocp_write_byte(tp, MCU_TYPE_USB, USB_LPM_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_AFE_CTRL2); ocp_data &= ~SEN_VAL_MASK; ocp_data |= SEN_VAL_NORMAL | SEL_RXIDLE; ocp_write_word(tp, MCU_TYPE_USB, USB_AFE_CTRL2, ocp_data); ocp_write_word(tp, MCU_TYPE_USB, USB_CONNECT_TIMER, 0x0001); r8153_power_cut_en(tp, false); rtl_runtime_suspend_enable(tp, false); r8153_mac_clk_speed_down(tp, false); r8153_u1u2en(tp, true); usb_enable_lpm(tp->udev); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6); ocp_data |= LANWAKE_CLR_EN; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG); ocp_data &= ~LANWAKE_PIN; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data); /* rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); if (tp->dell_tb_rx_agg_bug) ocp_data |= RX_AGG_DISABLE; ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); rtl_tally_reset(tp); switch (tp->udev->speed) { case USB_SPEED_SUPER: case USB_SPEED_SUPER_PLUS: tp->coalesce = COALESCE_SUPER; break; case USB_SPEED_HIGH: tp->coalesce = COALESCE_HIGH; break; default: tp->coalesce = COALESCE_SLOW; break; } } static void r8153b_init(struct r8152 *tp) { u32 ocp_data; u16 data; int i; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_u1u2en(tp, false); for (i = 0; i < 500; i++) { if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; msleep(20); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; } data = r8153_phy_status(tp, 0); data = r8152_mdio_read(tp, MII_BMCR); if (data & BMCR_PDOWN) { data &= ~BMCR_PDOWN; r8152_mdio_write(tp, MII_BMCR, data); } data = r8153_phy_status(tp, PHY_STAT_LAN_ON); r8153_u2p3en(tp, false); /* MSC timer = 0xfff * 8ms = 32760 ms */ ocp_write_word(tp, MCU_TYPE_USB, USB_MSC_TIMER, 0x0fff); r8153b_power_cut_en(tp, false); r8153b_ups_en(tp, false); r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); if (rtl8152_get_speed(tp) & LINK_STATUS) ocp_data |= CUR_LINK_OK; else ocp_data &= ~CUR_LINK_OK; ocp_data |= POLL_LINK_CHG; ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); if (tp->udev->speed >= USB_SPEED_SUPER) r8153b_u1u2en(tp, true); usb_enable_lpm(tp->udev); /* MAC clock speed down */ r8153_mac_clk_speed_down(tp, true); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ocp_data &= ~PLA_MCU_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); if (tp->version == RTL_VER_09) { /* Disable Test IO for 32QFN */ if (ocp_read_byte(tp, MCU_TYPE_PLA, 0xdc00) & BIT(5)) { ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); ocp_data |= TEST_IO_OFF; ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); } } set_bit(GREEN_ETHERNET, &tp->flags); /* rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); rtl_tally_reset(tp); tp->coalesce = 15000; /* 15 us */ } static void r8153c_init(struct r8152 *tp) { u32 ocp_data; u16 data; int i; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_u1u2en(tp, false); /* Disable spi_en */ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5); ocp_data &= ~BIT(3); ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, 0xcbf0); ocp_data |= BIT(1); ocp_write_word(tp, MCU_TYPE_USB, 0xcbf0, ocp_data); for (i = 0; i < 500; i++) { if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; msleep(20); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; } data = r8153_phy_status(tp, 0); data = r8152_mdio_read(tp, MII_BMCR); if (data & BMCR_PDOWN) { data &= ~BMCR_PDOWN; r8152_mdio_write(tp, MII_BMCR, data); } data = r8153_phy_status(tp, PHY_STAT_LAN_ON); r8153_u2p3en(tp, false); /* MSC timer = 0xfff * 8ms = 32760 ms */ ocp_write_word(tp, MCU_TYPE_USB, USB_MSC_TIMER, 0x0fff); r8153b_power_cut_en(tp, false); r8153c_ups_en(tp, false); r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); if (rtl8152_get_speed(tp) & LINK_STATUS) ocp_data |= CUR_LINK_OK; else ocp_data &= ~CUR_LINK_OK; ocp_data |= POLL_LINK_CHG; ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); r8153b_u1u2en(tp, true); usb_enable_lpm(tp->udev); /* MAC clock speed down */ r8153_mac_clk_speed_down(tp, true); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_2); ocp_data &= ~BIT(7); ocp_write_byte(tp, MCU_TYPE_USB, USB_MISC_2, ocp_data); set_bit(GREEN_ETHERNET, &tp->flags); /* rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); rtl_tally_reset(tp); tp->coalesce = 15000; /* 15 us */ } static void r8156_hw_phy_cfg(struct r8152 *tp) { u32 ocp_data; u16 data; ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); if (ocp_data & PCUT_STATUS) { ocp_data &= ~PCUT_STATUS; ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); } data = r8153_phy_status(tp, 0); switch (data) { case PHY_STAT_EXT_INIT: rtl8152_apply_firmware(tp, true); data = ocp_reg_read(tp, 0xa468); data &= ~(BIT(3) | BIT(1)); ocp_reg_write(tp, 0xa468, data); break; case PHY_STAT_LAN_ON: case PHY_STAT_PWRDN: default: rtl8152_apply_firmware(tp, false); break; } /* disable ALDPS before updating the PHY parameters */ r8153_aldps_en(tp, false); /* disable EEE before updating the PHY parameters */ rtl_eee_enable(tp, false); data = r8153_phy_status(tp, PHY_STAT_LAN_ON); WARN_ON_ONCE(data != PHY_STAT_LAN_ON); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); ocp_data |= PFM_PWM_SWITCH; ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); switch (tp->version) { case RTL_VER_10: data = ocp_reg_read(tp, 0xad40); data &= ~0x3ff; data |= BIT(7) | BIT(2); ocp_reg_write(tp, 0xad40, data); data = ocp_reg_read(tp, 0xad4e); data |= BIT(4); ocp_reg_write(tp, 0xad4e, data); data = ocp_reg_read(tp, 0xad16); data &= ~0x3ff; data |= 0x6; ocp_reg_write(tp, 0xad16, data); data = ocp_reg_read(tp, 0xad32); data &= ~0x3f; data |= 6; ocp_reg_write(tp, 0xad32, data); data = ocp_reg_read(tp, 0xac08); data &= ~(BIT(12) | BIT(8)); ocp_reg_write(tp, 0xac08, data); data = ocp_reg_read(tp, 0xac8a); data |= BIT(12) | BIT(13) | BIT(14); data &= ~BIT(15); ocp_reg_write(tp, 0xac8a, data); data = ocp_reg_read(tp, 0xad18); data |= BIT(10); ocp_reg_write(tp, 0xad18, data); data = ocp_reg_read(tp, 0xad1a); data |= 0x3ff; ocp_reg_write(tp, 0xad1a, data); data = ocp_reg_read(tp, 0xad1c); data |= 0x3ff; ocp_reg_write(tp, 0xad1c, data); data = sram_read(tp, 0x80ea); data &= ~0xff00; data |= 0xc400; sram_write(tp, 0x80ea, data); data = sram_read(tp, 0x80eb); data &= ~0x0700; data |= 0x0300; sram_write(tp, 0x80eb, data); data = sram_read(tp, 0x80f8); data &= ~0xff00; data |= 0x1c00; sram_write(tp, 0x80f8, data); data = sram_read(tp, 0x80f1); data &= ~0xff00; data |= 0x3000; sram_write(tp, 0x80f1, data); data = sram_read(tp, 0x80fe); data &= ~0xff00; data |= 0xa500; sram_write(tp, 0x80fe, data); data = sram_read(tp, 0x8102); data &= ~0xff00; data |= 0x5000; sram_write(tp, 0x8102, data); data = sram_read(tp, 0x8015); data &= ~0xff00; data |= 0x3300; sram_write(tp, 0x8015, data); data = sram_read(tp, 0x8100); data &= ~0xff00; data |= 0x7000; sram_write(tp, 0x8100, data); data = sram_read(tp, 0x8014); data &= ~0xff00; data |= 0xf000; sram_write(tp, 0x8014, data); data = sram_read(tp, 0x8016); data &= ~0xff00; data |= 0x6500; sram_write(tp, 0x8016, data); data = sram_read(tp, 0x80dc); data &= ~0xff00; data |= 0xed00; sram_write(tp, 0x80dc, data); data = sram_read(tp, 0x80df); data |= BIT(8); sram_write(tp, 0x80df, data); data = sram_read(tp, 0x80e1); data &= ~BIT(8); sram_write(tp, 0x80e1, data); data = ocp_reg_read(tp, 0xbf06); data &= ~0x003f; data |= 0x0038; ocp_reg_write(tp, 0xbf06, data); sram_write(tp, 0x819f, 0xddb6); ocp_reg_write(tp, 0xbc34, 0x5555); data = ocp_reg_read(tp, 0xbf0a); data &= ~0x0e00; data |= 0x0a00; ocp_reg_write(tp, 0xbf0a, data); data = ocp_reg_read(tp, 0xbd2c); data &= ~BIT(13); ocp_reg_write(tp, 0xbd2c, data); break; case RTL_VER_11: data = ocp_reg_read(tp, 0xad16); data |= 0x3ff; ocp_reg_write(tp, 0xad16, data); data = ocp_reg_read(tp, 0xad32); data &= ~0x3f; data |= 6; ocp_reg_write(tp, 0xad32, data); data = ocp_reg_read(tp, 0xac08); data &= ~(BIT(12) | BIT(8)); ocp_reg_write(tp, 0xac08, data); data = ocp_reg_read(tp, 0xacc0); data &= ~0x3; data |= BIT(1); ocp_reg_write(tp, 0xacc0, data); data = ocp_reg_read(tp, 0xad40); data &= ~0xe7; data |= BIT(6) | BIT(2); ocp_reg_write(tp, 0xad40, data); data = ocp_reg_read(tp, 0xac14); data &= ~BIT(7); ocp_reg_write(tp, 0xac14, data); data = ocp_reg_read(tp, 0xac80); data &= ~(BIT(8) | BIT(9)); ocp_reg_write(tp, 0xac80, data); data = ocp_reg_read(tp, 0xac5e); data &= ~0x7; data |= BIT(1); ocp_reg_write(tp, 0xac5e, data); ocp_reg_write(tp, 0xad4c, 0x00a8); ocp_reg_write(tp, 0xac5c, 0x01ff); data = ocp_reg_read(tp, 0xac8a); data &= ~0xf0; data |= BIT(4) | BIT(5); ocp_reg_write(tp, 0xac8a, data); ocp_reg_write(tp, 0xb87c, 0x8157); data = ocp_reg_read(tp, 0xb87e); data &= ~0xff00; data |= 0x0500; ocp_reg_write(tp, 0xb87e, data); ocp_reg_write(tp, 0xb87c, 0x8159); data = ocp_reg_read(tp, 0xb87e); data &= ~0xff00; data |= 0x0700; ocp_reg_write(tp, 0xb87e, data); /* AAGC */ ocp_reg_write(tp, 0xb87c, 0x80a2); ocp_reg_write(tp, 0xb87e, 0x0153); ocp_reg_write(tp, 0xb87c, 0x809c); ocp_reg_write(tp, 0xb87e, 0x0153); /* EEE parameter */ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_TXTWSYS_2P5G, 0x0056); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_USB_CFG); ocp_data |= EN_XG_LIP | EN_G_LIP; ocp_write_word(tp, MCU_TYPE_PLA, PLA_USB_CFG, ocp_data); sram_write(tp, 0x8257, 0x020f); /* XG PLL */ sram_write(tp, 0x80ea, 0x7843); /* GIGA Master */ if (rtl_phy_patch_request(tp, true, true)) return; /* Advance EEE */ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); ocp_data |= EEE_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); data = ocp_reg_read(tp, OCP_DOWN_SPEED); data &= ~(EN_EEE_100 | EN_EEE_1000); data |= EN_10M_CLKDIV; ocp_reg_write(tp, OCP_DOWN_SPEED, data); tp->ups_info._10m_ckdiv = true; tp->ups_info.eee_plloff_100 = false; tp->ups_info.eee_plloff_giga = false; data = ocp_reg_read(tp, OCP_POWER_CFG); data &= ~EEE_CLKDIV_EN; ocp_reg_write(tp, OCP_POWER_CFG, data); tp->ups_info.eee_ckdiv = false; ocp_reg_write(tp, OCP_SYSCLK_CFG, 0); ocp_reg_write(tp, OCP_SYSCLK_CFG, sysclk_div_expo(5)); tp->ups_info._250m_ckdiv = false; rtl_phy_patch_request(tp, false, true); /* enable ADC Ibias Cal */ data = ocp_reg_read(tp, 0xd068); data |= BIT(13); ocp_reg_write(tp, 0xd068, data); /* enable Thermal Sensor */ data = sram_read(tp, 0x81a2); data &= ~BIT(8); sram_write(tp, 0x81a2, data); data = ocp_reg_read(tp, 0xb54c); data &= ~0xff00; data |= 0xdb00; ocp_reg_write(tp, 0xb54c, data); /* Nway 2.5G Lite */ data = ocp_reg_read(tp, 0xa454); data &= ~BIT(0); ocp_reg_write(tp, 0xa454, data); /* CS DSP solution */ data = ocp_reg_read(tp, OCP_10GBT_CTRL); data |= RTL_ADV2_5G_F_R; ocp_reg_write(tp, OCP_10GBT_CTRL, data); data = ocp_reg_read(tp, 0xad4e); data &= ~BIT(4); ocp_reg_write(tp, 0xad4e, data); data = ocp_reg_read(tp, 0xa86a); data &= ~BIT(0); ocp_reg_write(tp, 0xa86a, data); /* MDI SWAP */ if ((ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CFG) & MID_REVERSE) && (ocp_reg_read(tp, 0xd068) & BIT(1))) { u16 swap_a, swap_b; data = ocp_reg_read(tp, 0xd068); data &= ~0x1f; data |= 0x1; /* p0 */ ocp_reg_write(tp, 0xd068, data); swap_a = ocp_reg_read(tp, 0xd06a); data &= ~0x18; data |= 0x18; /* p3 */ ocp_reg_write(tp, 0xd068, data); swap_b = ocp_reg_read(tp, 0xd06a); data &= ~0x18; /* p0 */ ocp_reg_write(tp, 0xd068, data); ocp_reg_write(tp, 0xd06a, (swap_a & ~0x7ff) | (swap_b & 0x7ff)); data |= 0x18; /* p3 */ ocp_reg_write(tp, 0xd068, data); ocp_reg_write(tp, 0xd06a, (swap_b & ~0x7ff) | (swap_a & 0x7ff)); data &= ~0x18; data |= 0x08; /* p1 */ ocp_reg_write(tp, 0xd068, data); swap_a = ocp_reg_read(tp, 0xd06a); data &= ~0x18; data |= 0x10; /* p2 */ ocp_reg_write(tp, 0xd068, data); swap_b = ocp_reg_read(tp, 0xd06a); data &= ~0x18; data |= 0x08; /* p1 */ ocp_reg_write(tp, 0xd068, data); ocp_reg_write(tp, 0xd06a, (swap_a & ~0x7ff) | (swap_b & 0x7ff)); data &= ~0x18; data |= 0x10; /* p2 */ ocp_reg_write(tp, 0xd068, data); ocp_reg_write(tp, 0xd06a, (swap_b & ~0x7ff) | (swap_a & 0x7ff)); swap_a = ocp_reg_read(tp, 0xbd5a); swap_b = ocp_reg_read(tp, 0xbd5c); ocp_reg_write(tp, 0xbd5a, (swap_a & ~0x1f1f) | ((swap_b & 0x1f) << 8) | ((swap_b >> 8) & 0x1f)); ocp_reg_write(tp, 0xbd5c, (swap_b & ~0x1f1f) | ((swap_a & 0x1f) << 8) | ((swap_a >> 8) & 0x1f)); swap_a = ocp_reg_read(tp, 0xbc18); swap_b = ocp_reg_read(tp, 0xbc1a); ocp_reg_write(tp, 0xbc18, (swap_a & ~0x1f1f) | ((swap_b & 0x1f) << 8) | ((swap_b >> 8) & 0x1f)); ocp_reg_write(tp, 0xbc1a, (swap_b & ~0x1f1f) | ((swap_a & 0x1f) << 8) | ((swap_a >> 8) & 0x1f)); } /* Notify the MAC when the speed is changed to force mode. */ data = ocp_reg_read(tp, OCP_INTR_EN); data |= INTR_SPEED_FORCE; ocp_reg_write(tp, OCP_INTR_EN, data); break; default: break; } rtl_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags)); data = ocp_reg_read(tp, 0xa428); data &= ~BIT(9); ocp_reg_write(tp, 0xa428, data); data = ocp_reg_read(tp, 0xa5ea); data &= ~BIT(0); ocp_reg_write(tp, 0xa5ea, data); tp->ups_info.lite_mode = 0; if (tp->eee_en) rtl_eee_enable(tp, true); r8153_aldps_en(tp, true); r8152b_enable_fc(tp); r8153_u2p3en(tp, true); set_bit(PHY_RESET, &tp->flags); } static void r8156b_hw_phy_cfg(struct r8152 *tp) { u32 ocp_data; u16 data; switch (tp->version) { case RTL_VER_12: ocp_reg_write(tp, 0xbf86, 0x9000); data = ocp_reg_read(tp, 0xc402); data |= BIT(10); ocp_reg_write(tp, 0xc402, data); data &= ~BIT(10); ocp_reg_write(tp, 0xc402, data); ocp_reg_write(tp, 0xbd86, 0x1010); ocp_reg_write(tp, 0xbd88, 0x1010); data = ocp_reg_read(tp, 0xbd4e); data &= ~(BIT(10) | BIT(11)); data |= BIT(11); ocp_reg_write(tp, 0xbd4e, data); data = ocp_reg_read(tp, 0xbf46); data &= ~0xf00; data |= 0x700; ocp_reg_write(tp, 0xbf46, data); break; case RTL_VER_13: case RTL_VER_15: r8156b_wait_loading_flash(tp); break; default: break; } ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); if (ocp_data & PCUT_STATUS) { ocp_data &= ~PCUT_STATUS; ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); } data = r8153_phy_status(tp, 0); switch (data) { case PHY_STAT_EXT_INIT: rtl8152_apply_firmware(tp, true); data = ocp_reg_read(tp, 0xa466); data &= ~BIT(0); ocp_reg_write(tp, 0xa466, data); data = ocp_reg_read(tp, 0xa468); data &= ~(BIT(3) | BIT(1)); ocp_reg_write(tp, 0xa468, data); break; case PHY_STAT_LAN_ON: case PHY_STAT_PWRDN: default: rtl8152_apply_firmware(tp, false); break; } data = r8152_mdio_read(tp, MII_BMCR); if (data & BMCR_PDOWN) { data &= ~BMCR_PDOWN; r8152_mdio_write(tp, MII_BMCR, data); } /* disable ALDPS before updating the PHY parameters */ r8153_aldps_en(tp, false); /* disable EEE before updating the PHY parameters */ rtl_eee_enable(tp, false); data = r8153_phy_status(tp, PHY_STAT_LAN_ON); WARN_ON_ONCE(data != PHY_STAT_LAN_ON); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); ocp_data |= PFM_PWM_SWITCH; ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); switch (tp->version) { case RTL_VER_12: data = ocp_reg_read(tp, 0xbc08); data |= BIT(3) | BIT(2); ocp_reg_write(tp, 0xbc08, data); data = sram_read(tp, 0x8fff); data &= ~0xff00; data |= 0x0400; sram_write(tp, 0x8fff, data); data = ocp_reg_read(tp, 0xacda); data |= 0xff00; ocp_reg_write(tp, 0xacda, data); data = ocp_reg_read(tp, 0xacde); data |= 0xf000; ocp_reg_write(tp, 0xacde, data); ocp_reg_write(tp, 0xac8c, 0x0ffc); ocp_reg_write(tp, 0xac46, 0xb7b4); ocp_reg_write(tp, 0xac50, 0x0fbc); ocp_reg_write(tp, 0xac3c, 0x9240); ocp_reg_write(tp, 0xac4e, 0x0db4); ocp_reg_write(tp, 0xacc6, 0x0707); ocp_reg_write(tp, 0xacc8, 0xa0d3); ocp_reg_write(tp, 0xad08, 0x0007); ocp_reg_write(tp, 0xb87c, 0x8560); ocp_reg_write(tp, 0xb87e, 0x19cc); ocp_reg_write(tp, 0xb87c, 0x8562); ocp_reg_write(tp, 0xb87e, 0x19cc); ocp_reg_write(tp, 0xb87c, 0x8564); ocp_reg_write(tp, 0xb87e, 0x19cc); ocp_reg_write(tp, 0xb87c, 0x8566); ocp_reg_write(tp, 0xb87e, 0x147d); ocp_reg_write(tp, 0xb87c, 0x8568); ocp_reg_write(tp, 0xb87e, 0x147d); ocp_reg_write(tp, 0xb87c, 0x856a); ocp_reg_write(tp, 0xb87e, 0x147d); ocp_reg_write(tp, 0xb87c, 0x8ffe); ocp_reg_write(tp, 0xb87e, 0x0907); ocp_reg_write(tp, 0xb87c, 0x80d6); ocp_reg_write(tp, 0xb87e, 0x2801); ocp_reg_write(tp, 0xb87c, 0x80f2); ocp_reg_write(tp, 0xb87e, 0x2801); ocp_reg_write(tp, 0xb87c, 0x80f4); ocp_reg_write(tp, 0xb87e, 0x6077); ocp_reg_write(tp, 0xb506, 0x01e7); ocp_reg_write(tp, 0xb87c, 0x8013); ocp_reg_write(tp, 0xb87e, 0x0700); ocp_reg_write(tp, 0xb87c, 0x8fb9); ocp_reg_write(tp, 0xb87e, 0x2801); ocp_reg_write(tp, 0xb87c, 0x8fba); ocp_reg_write(tp, 0xb87e, 0x0100); ocp_reg_write(tp, 0xb87c, 0x8fbc); ocp_reg_write(tp, 0xb87e, 0x1900); ocp_reg_write(tp, 0xb87c, 0x8fbe); ocp_reg_write(tp, 0xb87e, 0xe100); ocp_reg_write(tp, 0xb87c, 0x8fc0); ocp_reg_write(tp, 0xb87e, 0x0800); ocp_reg_write(tp, 0xb87c, 0x8fc2); ocp_reg_write(tp, 0xb87e, 0xe500); ocp_reg_write(tp, 0xb87c, 0x8fc4); ocp_reg_write(tp, 0xb87e, 0x0f00); ocp_reg_write(tp, 0xb87c, 0x8fc6); ocp_reg_write(tp, 0xb87e, 0xf100); ocp_reg_write(tp, 0xb87c, 0x8fc8); ocp_reg_write(tp, 0xb87e, 0x0400); ocp_reg_write(tp, 0xb87c, 0x8fca); ocp_reg_write(tp, 0xb87e, 0xf300); ocp_reg_write(tp, 0xb87c, 0x8fcc); ocp_reg_write(tp, 0xb87e, 0xfd00); ocp_reg_write(tp, 0xb87c, 0x8fce); ocp_reg_write(tp, 0xb87e, 0xff00); ocp_reg_write(tp, 0xb87c, 0x8fd0); ocp_reg_write(tp, 0xb87e, 0xfb00); ocp_reg_write(tp, 0xb87c, 0x8fd2); ocp_reg_write(tp, 0xb87e, 0x0100); ocp_reg_write(tp, 0xb87c, 0x8fd4); ocp_reg_write(tp, 0xb87e, 0xf400); ocp_reg_write(tp, 0xb87c, 0x8fd6); ocp_reg_write(tp, 0xb87e, 0xff00); ocp_reg_write(tp, 0xb87c, 0x8fd8); ocp_reg_write(tp, 0xb87e, 0xf600); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_USB_CFG); ocp_data |= EN_XG_LIP | EN_G_LIP; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_USB_CFG, ocp_data); ocp_reg_write(tp, 0xb87c, 0x813d); ocp_reg_write(tp, 0xb87e, 0x390e); ocp_reg_write(tp, 0xb87c, 0x814f); ocp_reg_write(tp, 0xb87e, 0x790e); ocp_reg_write(tp, 0xb87c, 0x80b0); ocp_reg_write(tp, 0xb87e, 0x0f31); data = ocp_reg_read(tp, 0xbf4c); data |= BIT(1); ocp_reg_write(tp, 0xbf4c, data); data = ocp_reg_read(tp, 0xbcca); data |= BIT(9) | BIT(8); ocp_reg_write(tp, 0xbcca, data); ocp_reg_write(tp, 0xb87c, 0x8141); ocp_reg_write(tp, 0xb87e, 0x320e); ocp_reg_write(tp, 0xb87c, 0x8153); ocp_reg_write(tp, 0xb87e, 0x720e); ocp_reg_write(tp, 0xb87c, 0x8529); ocp_reg_write(tp, 0xb87e, 0x050e); data = ocp_reg_read(tp, OCP_EEE_CFG); data &= ~CTAP_SHORT_EN; ocp_reg_write(tp, OCP_EEE_CFG, data); sram_write(tp, 0x816c, 0xc4a0); sram_write(tp, 0x8170, 0xc4a0); sram_write(tp, 0x8174, 0x04a0); sram_write(tp, 0x8178, 0x04a0); sram_write(tp, 0x817c, 0x0719); sram_write(tp, 0x8ff4, 0x0400); sram_write(tp, 0x8ff1, 0x0404); ocp_reg_write(tp, 0xbf4a, 0x001b); ocp_reg_write(tp, 0xb87c, 0x8033); ocp_reg_write(tp, 0xb87e, 0x7c13); ocp_reg_write(tp, 0xb87c, 0x8037); ocp_reg_write(tp, 0xb87e, 0x7c13); ocp_reg_write(tp, 0xb87c, 0x803b); ocp_reg_write(tp, 0xb87e, 0xfc32); ocp_reg_write(tp, 0xb87c, 0x803f); ocp_reg_write(tp, 0xb87e, 0x7c13); ocp_reg_write(tp, 0xb87c, 0x8043); ocp_reg_write(tp, 0xb87e, 0x7c13); ocp_reg_write(tp, 0xb87c, 0x8047); ocp_reg_write(tp, 0xb87e, 0x7c13); ocp_reg_write(tp, 0xb87c, 0x8145); ocp_reg_write(tp, 0xb87e, 0x370e); ocp_reg_write(tp, 0xb87c, 0x8157); ocp_reg_write(tp, 0xb87e, 0x770e); ocp_reg_write(tp, 0xb87c, 0x8169); ocp_reg_write(tp, 0xb87e, 0x0d0a); ocp_reg_write(tp, 0xb87c, 0x817b); ocp_reg_write(tp, 0xb87e, 0x1d0a); data = sram_read(tp, 0x8217); data &= ~0xff00; data |= 0x5000; sram_write(tp, 0x8217, data); data = sram_read(tp, 0x821a); data &= ~0xff00; data |= 0x5000; sram_write(tp, 0x821a, data); sram_write(tp, 0x80da, 0x0403); data = sram_read(tp, 0x80dc); data &= ~0xff00; data |= 0x1000; sram_write(tp, 0x80dc, data); sram_write(tp, 0x80b3, 0x0384); sram_write(tp, 0x80b7, 0x2007); data = sram_read(tp, 0x80ba); data &= ~0xff00; data |= 0x6c00; sram_write(tp, 0x80ba, data); sram_write(tp, 0x80b5, 0xf009); data = sram_read(tp, 0x80bd); data &= ~0xff00; data |= 0x9f00; sram_write(tp, 0x80bd, data); sram_write(tp, 0x80c7, 0xf083); sram_write(tp, 0x80dd, 0x03f0); data = sram_read(tp, 0x80df); data &= ~0xff00; data |= 0x1000; sram_write(tp, 0x80df, data); sram_write(tp, 0x80cb, 0x2007); data = sram_read(tp, 0x80ce); data &= ~0xff00; data |= 0x6c00; sram_write(tp, 0x80ce, data); sram_write(tp, 0x80c9, 0x8009); data = sram_read(tp, 0x80d1); data &= ~0xff00; data |= 0x8000; sram_write(tp, 0x80d1, data); sram_write(tp, 0x80a3, 0x200a); sram_write(tp, 0x80a5, 0xf0ad); sram_write(tp, 0x809f, 0x6073); sram_write(tp, 0x80a1, 0x000b); data = sram_read(tp, 0x80a9); data &= ~0xff00; data |= 0xc000; sram_write(tp, 0x80a9, data); if (rtl_phy_patch_request(tp, true, true)) return; data = ocp_reg_read(tp, 0xb896); data &= ~BIT(0); ocp_reg_write(tp, 0xb896, data); data = ocp_reg_read(tp, 0xb892); data &= ~0xff00; ocp_reg_write(tp, 0xb892, data); ocp_reg_write(tp, 0xb88e, 0xc23e); ocp_reg_write(tp, 0xb890, 0x0000); ocp_reg_write(tp, 0xb88e, 0xc240); ocp_reg_write(tp, 0xb890, 0x0103); ocp_reg_write(tp, 0xb88e, 0xc242); ocp_reg_write(tp, 0xb890, 0x0507); ocp_reg_write(tp, 0xb88e, 0xc244); ocp_reg_write(tp, 0xb890, 0x090b); ocp_reg_write(tp, 0xb88e, 0xc246); ocp_reg_write(tp, 0xb890, 0x0c0e); ocp_reg_write(tp, 0xb88e, 0xc248); ocp_reg_write(tp, 0xb890, 0x1012); ocp_reg_write(tp, 0xb88e, 0xc24a); ocp_reg_write(tp, 0xb890, 0x1416); data = ocp_reg_read(tp, 0xb896); data |= BIT(0); ocp_reg_write(tp, 0xb896, data); rtl_phy_patch_request(tp, false, true); data = ocp_reg_read(tp, 0xa86a); data |= BIT(0); ocp_reg_write(tp, 0xa86a, data); data = ocp_reg_read(tp, 0xa6f0); data |= BIT(0); ocp_reg_write(tp, 0xa6f0, data); ocp_reg_write(tp, 0xbfa0, 0xd70d); ocp_reg_write(tp, 0xbfa2, 0x4100); ocp_reg_write(tp, 0xbfa4, 0xe868); ocp_reg_write(tp, 0xbfa6, 0xdc59); ocp_reg_write(tp, 0xb54c, 0x3c18); data = ocp_reg_read(tp, 0xbfa4); data &= ~BIT(5); ocp_reg_write(tp, 0xbfa4, data); data = sram_read(tp, 0x817d); data |= BIT(12); sram_write(tp, 0x817d, data); break; case RTL_VER_13: /* 2.5G INRX */ data = ocp_reg_read(tp, 0xac46); data &= ~0x00f0; data |= 0x0090; ocp_reg_write(tp, 0xac46, data); data = ocp_reg_read(tp, 0xad30); data &= ~0x0003; data |= 0x0001; ocp_reg_write(tp, 0xad30, data); fallthrough; case RTL_VER_15: /* EEE parameter */ ocp_reg_write(tp, 0xb87c, 0x80f5); ocp_reg_write(tp, 0xb87e, 0x760e); ocp_reg_write(tp, 0xb87c, 0x8107); ocp_reg_write(tp, 0xb87e, 0x360e); ocp_reg_write(tp, 0xb87c, 0x8551); data = ocp_reg_read(tp, 0xb87e); data &= ~0xff00; data |= 0x0800; ocp_reg_write(tp, 0xb87e, data); /* ADC_PGA parameter */ data = ocp_reg_read(tp, 0xbf00); data &= ~0xe000; data |= 0xa000; ocp_reg_write(tp, 0xbf00, data); data = ocp_reg_read(tp, 0xbf46); data &= ~0x0f00; data |= 0x0300; ocp_reg_write(tp, 0xbf46, data); /* Green Table-PGA, 1G full viterbi */ sram_write(tp, 0x8044, 0x2417); sram_write(tp, 0x804a, 0x2417); sram_write(tp, 0x8050, 0x2417); sram_write(tp, 0x8056, 0x2417); sram_write(tp, 0x805c, 0x2417); sram_write(tp, 0x8062, 0x2417); sram_write(tp, 0x8068, 0x2417); sram_write(tp, 0x806e, 0x2417); sram_write(tp, 0x8074, 0x2417); sram_write(tp, 0x807a, 0x2417); /* XG PLL */ data = ocp_reg_read(tp, 0xbf84); data &= ~0xe000; data |= 0xa000; ocp_reg_write(tp, 0xbf84, data); break; default: break; } /* Notify the MAC when the speed is changed to force mode. */ data = ocp_reg_read(tp, OCP_INTR_EN); data |= INTR_SPEED_FORCE; ocp_reg_write(tp, OCP_INTR_EN, data); if (rtl_phy_patch_request(tp, true, true)) return; ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4); ocp_data |= EEE_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, ocp_data); data = ocp_reg_read(tp, OCP_DOWN_SPEED); data &= ~(EN_EEE_100 | EN_EEE_1000); data |= EN_10M_CLKDIV; ocp_reg_write(tp, OCP_DOWN_SPEED, data); tp->ups_info._10m_ckdiv = true; tp->ups_info.eee_plloff_100 = false; tp->ups_info.eee_plloff_giga = false; data = ocp_reg_read(tp, OCP_POWER_CFG); data &= ~EEE_CLKDIV_EN; ocp_reg_write(tp, OCP_POWER_CFG, data); tp->ups_info.eee_ckdiv = false; rtl_phy_patch_request(tp, false, true); rtl_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags)); data = ocp_reg_read(tp, 0xa428); data &= ~BIT(9); ocp_reg_write(tp, 0xa428, data); data = ocp_reg_read(tp, 0xa5ea); data &= ~BIT(0); ocp_reg_write(tp, 0xa5ea, data); tp->ups_info.lite_mode = 0; if (tp->eee_en) rtl_eee_enable(tp, true); r8153_aldps_en(tp, true); r8152b_enable_fc(tp); r8153_u2p3en(tp, true); set_bit(PHY_RESET, &tp->flags); } static void r8156_init(struct r8152 *tp) { u32 ocp_data; u16 data; int i; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP); ocp_data &= ~EN_ALL_SPEED; ocp_write_byte(tp, MCU_TYPE_USB, USB_ECM_OP, ocp_data); ocp_write_word(tp, MCU_TYPE_USB, USB_SPEED_OPTION, 0); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_ECM_OPTION); ocp_data |= BYPASS_MAC_RESET; ocp_write_word(tp, MCU_TYPE_USB, USB_ECM_OPTION, ocp_data); r8153b_u1u2en(tp, false); for (i = 0; i < 500; i++) { if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; msleep(20); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; } data = r8153_phy_status(tp, 0); if (data == PHY_STAT_EXT_INIT) { data = ocp_reg_read(tp, 0xa468); data &= ~(BIT(3) | BIT(1)); ocp_reg_write(tp, 0xa468, data); } data = r8152_mdio_read(tp, MII_BMCR); if (data & BMCR_PDOWN) { data &= ~BMCR_PDOWN; r8152_mdio_write(tp, MII_BMCR, data); } data = r8153_phy_status(tp, PHY_STAT_LAN_ON); WARN_ON_ONCE(data != PHY_STAT_LAN_ON); r8153_u2p3en(tp, false); /* MSC timer = 0xfff * 8ms = 32760 ms */ ocp_write_word(tp, MCU_TYPE_USB, USB_MSC_TIMER, 0x0fff); /* U1/U2/L1 idle timer. 500 us */ ocp_write_word(tp, MCU_TYPE_USB, USB_U1U2_TIMER, 500); r8153b_power_cut_en(tp, false); r8156_ups_en(tp, false); r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); if (tp->udev->speed >= USB_SPEED_SUPER) r8153b_u1u2en(tp, true); usb_enable_lpm(tp->udev); r8156_mac_clk_spd(tp, true); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ocp_data &= ~PLA_MCU_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); if (rtl8152_get_speed(tp) & LINK_STATUS) ocp_data |= CUR_LINK_OK; else ocp_data &= ~CUR_LINK_OK; ocp_data |= POLL_LINK_CHG; ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); set_bit(GREEN_ETHERNET, &tp->flags); /* rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_BMU_CONFIG); ocp_data |= ACT_ODMA; ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_CONFIG, ocp_data); r8156_mdio_force_mode(tp); rtl_tally_reset(tp); tp->coalesce = 15000; /* 15 us */ } static void r8156b_init(struct r8152 *tp) { u32 ocp_data; u16 data; int i; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP); ocp_data &= ~EN_ALL_SPEED; ocp_write_byte(tp, MCU_TYPE_USB, USB_ECM_OP, ocp_data); ocp_write_word(tp, MCU_TYPE_USB, USB_SPEED_OPTION, 0); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_ECM_OPTION); ocp_data |= BYPASS_MAC_RESET; ocp_write_word(tp, MCU_TYPE_USB, USB_ECM_OPTION, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL); ocp_data |= RX_DETECT8; ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data); r8153b_u1u2en(tp, false); switch (tp->version) { case RTL_VER_13: case RTL_VER_15: r8156b_wait_loading_flash(tp); break; default: break; } for (i = 0; i < 500; i++) { if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; msleep(20); if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; } data = r8153_phy_status(tp, 0); if (data == PHY_STAT_EXT_INIT) { data = ocp_reg_read(tp, 0xa468); data &= ~(BIT(3) | BIT(1)); ocp_reg_write(tp, 0xa468, data); data = ocp_reg_read(tp, 0xa466); data &= ~BIT(0); ocp_reg_write(tp, 0xa466, data); } data = r8152_mdio_read(tp, MII_BMCR); if (data & BMCR_PDOWN) { data &= ~BMCR_PDOWN; r8152_mdio_write(tp, MII_BMCR, data); } data = r8153_phy_status(tp, PHY_STAT_LAN_ON); r8153_u2p3en(tp, false); /* MSC timer = 0xfff * 8ms = 32760 ms */ ocp_write_word(tp, MCU_TYPE_USB, USB_MSC_TIMER, 0x0fff); /* U1/U2/L1 idle timer. 500 us */ ocp_write_word(tp, MCU_TYPE_USB, USB_U1U2_TIMER, 500); r8153b_power_cut_en(tp, false); r8156_ups_en(tp, false); r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); if (tp->udev->speed >= USB_SPEED_SUPER) r8153b_u1u2en(tp, true); usb_enable_lpm(tp->udev); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data &= ~SLOT_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); ocp_data |= FLOW_CTRL_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); /* enable fc timer and set timer to 600 ms. */ ocp_write_word(tp, MCU_TYPE_USB, USB_FC_TIMER, CTRL_TIMER_EN | (600 / 8)); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_CTRL); if (!(ocp_read_word(tp, MCU_TYPE_PLA, PLA_POL_GPIO_CTRL) & DACK_DET_EN)) ocp_data |= FLOW_CTRL_PATCH_2; ocp_data &= ~AUTO_SPEEDUP; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_CTRL, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); ocp_data |= FC_PATCH_TASK; ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); r8156_mac_clk_spd(tp, true); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); ocp_data &= ~PLA_MCU_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); if (rtl8152_get_speed(tp) & LINK_STATUS) ocp_data |= CUR_LINK_OK; else ocp_data &= ~CUR_LINK_OK; ocp_data |= POLL_LINK_CHG; ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); set_bit(GREEN_ETHERNET, &tp->flags); /* rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); r8156_mdio_force_mode(tp); rtl_tally_reset(tp); tp->coalesce = 15000; /* 15 us */ } static bool rtl_check_vendor_ok(struct usb_interface *intf) { struct usb_host_interface *alt = intf->cur_altsetting; struct usb_endpoint_descriptor *in, *out, *intr; if (usb_find_common_endpoints(alt, &in, &out, &intr, NULL) < 0) { dev_err(&intf->dev, "Expected endpoints are not found\n"); return false; } /* Check Rx endpoint address */ if (usb_endpoint_num(in) != 1) { dev_err(&intf->dev, "Invalid Rx endpoint address\n"); return false; } /* Check Tx endpoint address */ if (usb_endpoint_num(out) != 2) { dev_err(&intf->dev, "Invalid Tx endpoint address\n"); return false; } /* Check interrupt endpoint address */ if (usb_endpoint_num(intr) != 3) { dev_err(&intf->dev, "Invalid interrupt endpoint address\n"); return false; } return true; } static int rtl8152_pre_reset(struct usb_interface *intf) { struct r8152 *tp = usb_get_intfdata(intf); struct net_device *netdev; rtnl_lock(); if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags)) return 0; netdev = tp->netdev; if (!netif_running(netdev)) return 0; netif_stop_queue(netdev); tasklet_disable(&tp->tx_tl); clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); cancel_delayed_work_sync(&tp->schedule); napi_disable(&tp->napi); if (netif_carrier_ok(netdev)) { mutex_lock(&tp->control); set_bit(IN_PRE_RESET, &tp->flags); tp->rtl_ops.disable(tp); clear_bit(IN_PRE_RESET, &tp->flags); mutex_unlock(&tp->control); } return 0; } static int rtl8152_post_reset(struct usb_interface *intf) { struct r8152 *tp = usb_get_intfdata(intf); struct net_device *netdev; struct sockaddr_storage ss; if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags)) goto exit; rtl_set_accessible(tp); /* reset the MAC address in case of policy change */ if (determine_ethernet_addr(tp, &ss) >= 0) dev_set_mac_address(tp->netdev, &ss, NULL); netdev = tp->netdev; if (!netif_running(netdev)) goto exit; set_bit(WORK_ENABLE, &tp->flags); if (netif_carrier_ok(netdev)) { mutex_lock(&tp->control); tp->rtl_ops.enable(tp); rtl_start_rx(tp); _rtl8152_set_rx_mode(netdev); mutex_unlock(&tp->control); } napi_enable(&tp->napi); tasklet_enable(&tp->tx_tl); netif_wake_queue(netdev); usb_submit_urb(tp->intr_urb, GFP_KERNEL); if (!list_empty(&tp->rx_done)) napi_schedule(&tp->napi); exit: rtnl_unlock(); return 0; } static bool delay_autosuspend(struct r8152 *tp) { bool sw_linking = !!netif_carrier_ok(tp->netdev); bool hw_linking = !!(rtl8152_get_speed(tp) & LINK_STATUS); /* This means a linking change occurs and the driver doesn't detect it, * yet. If the driver has disabled tx/rx and hw is linking on, the * device wouldn't wake up by receiving any packet. */ if (work_busy(&tp->schedule.work) || sw_linking != hw_linking) return true; /* If the linking down is occurred by nway, the device may miss the * linking change event. And it wouldn't wake when linking on. */ if (!sw_linking && tp->rtl_ops.in_nway(tp)) return true; else if (!skb_queue_empty(&tp->tx_queue)) return true; else return false; } static int rtl8152_runtime_resume(struct r8152 *tp) { struct net_device *netdev = tp->netdev; if (netif_running(netdev) && netdev->flags & IFF_UP) { struct napi_struct *napi = &tp->napi; tp->rtl_ops.autosuspend_en(tp, false); napi_disable(napi); set_bit(WORK_ENABLE, &tp->flags); if (netif_carrier_ok(netdev)) { if (rtl8152_get_speed(tp) & LINK_STATUS) { rtl_start_rx(tp); } else { netif_carrier_off(netdev); tp->rtl_ops.disable(tp); netif_info(tp, link, netdev, "linking down\n"); } } napi_enable(napi); clear_bit(SELECTIVE_SUSPEND, &tp->flags); smp_mb__after_atomic(); if (!list_empty(&tp->rx_done)) napi_schedule(&tp->napi); usb_submit_urb(tp->intr_urb, GFP_NOIO); } else { if (netdev->flags & IFF_UP) tp->rtl_ops.autosuspend_en(tp, false); clear_bit(SELECTIVE_SUSPEND, &tp->flags); } return 0; } static int rtl8152_system_resume(struct r8152 *tp) { struct net_device *netdev = tp->netdev; netif_device_attach(netdev); if (netif_running(netdev) && (netdev->flags & IFF_UP)) { tp->rtl_ops.up(tp); netif_carrier_off(netdev); set_bit(WORK_ENABLE, &tp->flags); usb_submit_urb(tp->intr_urb, GFP_NOIO); } /* If the device is RTL8152_INACCESSIBLE here then we should do a * reset. This is important because the usb_lock_device_for_reset() * that happens as a result of usb_queue_reset_device() will silently * fail if the device was suspended or if too much time passed. * * NOTE: The device is locked here so we can directly do the reset. * We don't need usb_lock_device_for_reset() because that's just a * wrapper over device_lock() and device_resume() (which calls us) * does that for us. */ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) usb_reset_device(tp->udev); return 0; } static int rtl8152_runtime_suspend(struct r8152 *tp) { struct net_device *netdev = tp->netdev; int ret = 0; if (!tp->rtl_ops.autosuspend_en) return -EBUSY; set_bit(SELECTIVE_SUSPEND, &tp->flags); smp_mb__after_atomic(); if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) { u32 rcr = 0; if (netif_carrier_ok(netdev)) { u32 ocp_data; rcr = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data = rcr & ~RCR_ACPT_ALL; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); rxdy_gated_en(tp, true); ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if (!(ocp_data & RXFIFO_EMPTY)) { rxdy_gated_en(tp, false); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr); clear_bit(SELECTIVE_SUSPEND, &tp->flags); smp_mb__after_atomic(); ret = -EBUSY; goto out1; } } clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); tp->rtl_ops.autosuspend_en(tp, true); if (netif_carrier_ok(netdev)) { struct napi_struct *napi = &tp->napi; napi_disable(napi); rtl_stop_rx(tp); rxdy_gated_en(tp, false); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr); napi_enable(napi); } if (delay_autosuspend(tp)) { rtl8152_runtime_resume(tp); ret = -EBUSY; } } out1: return ret; } static int rtl8152_system_suspend(struct r8152 *tp) { struct net_device *netdev = tp->netdev; netif_device_detach(netdev); if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) { struct napi_struct *napi = &tp->napi; clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); tasklet_disable(&tp->tx_tl); napi_disable(napi); cancel_delayed_work_sync(&tp->schedule); tp->rtl_ops.down(tp); napi_enable(napi); tasklet_enable(&tp->tx_tl); } /* If we're inaccessible here then some of the work that we did to * get the adapter ready for suspend didn't work. Queue up a wakeup * event so we can try again. */ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) pm_wakeup_event(&tp->udev->dev, 0); return 0; } static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) { struct r8152 *tp = usb_get_intfdata(intf); int ret; mutex_lock(&tp->control); if (PMSG_IS_AUTO(message)) ret = rtl8152_runtime_suspend(tp); else ret = rtl8152_system_suspend(tp); mutex_unlock(&tp->control); return ret; } static int rtl8152_resume(struct usb_interface *intf) { struct r8152 *tp = usb_get_intfdata(intf); int ret; mutex_lock(&tp->control); rtl_reset_ocp_base(tp); if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) ret = rtl8152_runtime_resume(tp); else ret = rtl8152_system_resume(tp); mutex_unlock(&tp->control); return ret; } static int rtl8152_reset_resume(struct usb_interface *intf) { struct r8152 *tp = usb_get_intfdata(intf); clear_bit(SELECTIVE_SUSPEND, &tp->flags); rtl_reset_ocp_base(tp); tp->rtl_ops.init(tp); queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0); set_ethernet_addr(tp, true); return rtl8152_resume(intf); } static void rtl8152_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct r8152 *tp = netdev_priv(dev); if (usb_autopm_get_interface(tp->intf) < 0) return; if (!rtl_can_wakeup(tp)) { wol->supported = 0; wol->wolopts = 0; } else { mutex_lock(&tp->control); wol->supported = WAKE_ANY; wol->wolopts = __rtl_get_wol(tp); mutex_unlock(&tp->control); } usb_autopm_put_interface(tp->intf); } static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct r8152 *tp = netdev_priv(dev); int ret; if (!rtl_can_wakeup(tp)) return -EOPNOTSUPP; if (wol->wolopts & ~WAKE_ANY) return -EINVAL; ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out_set_wol; mutex_lock(&tp->control); __rtl_set_wol(tp, wol->wolopts); tp->saved_wolopts = wol->wolopts & WAKE_ANY; mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); out_set_wol: return ret; } static u32 rtl8152_get_msglevel(struct net_device *dev) { struct r8152 *tp = netdev_priv(dev); return tp->msg_enable; } static void rtl8152_set_msglevel(struct net_device *dev, u32 value) { struct r8152 *tp = netdev_priv(dev); tp->msg_enable = value; } static void rtl8152_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { struct r8152 *tp = netdev_priv(netdev); strscpy(info->driver, MODULENAME, sizeof(info->driver)); strscpy(info->version, DRIVER_VERSION, sizeof(info->version)); usb_make_path(tp->udev, info->bus_info, sizeof(info->bus_info)); if (!IS_ERR_OR_NULL(tp->rtl_fw.fw)) strscpy(info->fw_version, tp->rtl_fw.version, sizeof(info->fw_version)); } static int rtl8152_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct r8152 *tp = netdev_priv(netdev); int ret; if (!tp->mii.mdio_read) return -EOPNOTSUPP; ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out; mutex_lock(&tp->control); mii_ethtool_get_link_ksettings(&tp->mii, cmd); linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, cmd->link_modes.supported, tp->support_2500full); if (tp->support_2500full) { linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, cmd->link_modes.advertising, ocp_reg_read(tp, OCP_10GBT_CTRL) & MDIO_AN_10GBT_CTRL_ADV2_5G); linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, cmd->link_modes.lp_advertising, ocp_reg_read(tp, OCP_10GBT_STAT) & MDIO_AN_10GBT_STAT_LP2_5G); if (is_speed_2500(rtl8152_get_speed(tp))) cmd->base.speed = SPEED_2500; } mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); out: return ret; } static int rtl8152_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { struct r8152 *tp = netdev_priv(dev); u32 advertising = 0; int ret; ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out; if (test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, cmd->link_modes.advertising)) advertising |= RTL_ADVERTISED_10_HALF; if (test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, cmd->link_modes.advertising)) advertising |= RTL_ADVERTISED_10_FULL; if (test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, cmd->link_modes.advertising)) advertising |= RTL_ADVERTISED_100_HALF; if (test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, cmd->link_modes.advertising)) advertising |= RTL_ADVERTISED_100_FULL; if (test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, cmd->link_modes.advertising)) advertising |= RTL_ADVERTISED_1000_HALF; if (test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, cmd->link_modes.advertising)) advertising |= RTL_ADVERTISED_1000_FULL; if (test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, cmd->link_modes.advertising)) advertising |= RTL_ADVERTISED_2500_FULL; mutex_lock(&tp->control); ret = rtl8152_set_speed(tp, cmd->base.autoneg, cmd->base.speed, cmd->base.duplex, advertising); if (!ret) { tp->autoneg = cmd->base.autoneg; tp->speed = cmd->base.speed; tp->duplex = cmd->base.duplex; tp->advertising = advertising; } mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); out: return ret; } static const char rtl8152_gstrings[][ETH_GSTRING_LEN] = { "tx_packets", "rx_packets", "tx_errors", "rx_errors", "rx_missed", "align_errors", "tx_single_collisions", "tx_multi_collisions", "rx_unicast", "rx_broadcast", "rx_multicast", "tx_aborted", "tx_underrun", }; static int rtl8152_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(rtl8152_gstrings); default: return -EOPNOTSUPP; } } static void rtl8152_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct r8152 *tp = netdev_priv(dev); struct tally_counter tally; if (usb_autopm_get_interface(tp->intf) < 0) return; generic_ocp_read(tp, PLA_TALLYCNT, sizeof(tally), &tally, MCU_TYPE_PLA); usb_autopm_put_interface(tp->intf); data[0] = le64_to_cpu(tally.tx_packets); data[1] = le64_to_cpu(tally.rx_packets); data[2] = le64_to_cpu(tally.tx_errors); data[3] = le32_to_cpu(tally.rx_errors); data[4] = le16_to_cpu(tally.rx_missed); data[5] = le16_to_cpu(tally.align_errors); data[6] = le32_to_cpu(tally.tx_one_collision); data[7] = le32_to_cpu(tally.tx_multi_collision); data[8] = le64_to_cpu(tally.rx_unicast); data[9] = le64_to_cpu(tally.rx_broadcast); data[10] = le32_to_cpu(tally.rx_multicast); data[11] = le16_to_cpu(tally.tx_aborted); data[12] = le16_to_cpu(tally.tx_underrun); } static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data) { switch (stringset) { case ETH_SS_STATS: memcpy(data, rtl8152_gstrings, sizeof(rtl8152_gstrings)); break; } } static int r8152_get_eee(struct r8152 *tp, struct ethtool_keee *eee) { __ETHTOOL_DECLARE_LINK_MODE_MASK(common); u16 val; val = r8152_mmd_read(tp, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); mii_eee_cap1_mod_linkmode_t(eee->supported, val); val = r8152_mmd_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV); mii_eee_cap1_mod_linkmode_t(eee->advertised, val); val = r8152_mmd_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE); mii_eee_cap1_mod_linkmode_t(eee->lp_advertised, val); eee->eee_enabled = tp->eee_en; linkmode_and(common, eee->advertised, eee->lp_advertised); eee->eee_active = phy_check_valid(tp->speed, tp->duplex, common); return 0; } static int r8152_set_eee(struct r8152 *tp, struct ethtool_keee *eee) { u16 val = linkmode_to_mii_eee_cap1_t(eee->advertised); tp->eee_en = eee->eee_enabled; tp->eee_adv = val; rtl_eee_enable(tp, tp->eee_en); return 0; } static int r8153_get_eee(struct r8152 *tp, struct ethtool_keee *eee) { __ETHTOOL_DECLARE_LINK_MODE_MASK(common); u16 val; val = ocp_reg_read(tp, OCP_EEE_ABLE); mii_eee_cap1_mod_linkmode_t(eee->supported, val); val = ocp_reg_read(tp, OCP_EEE_ADV); mii_eee_cap1_mod_linkmode_t(eee->advertised, val); val = ocp_reg_read(tp, OCP_EEE_LPABLE); mii_eee_cap1_mod_linkmode_t(eee->lp_advertised, val); eee->eee_enabled = tp->eee_en; linkmode_and(common, eee->advertised, eee->lp_advertised); eee->eee_active = phy_check_valid(tp->speed, tp->duplex, common); return 0; } static int rtl_ethtool_get_eee(struct net_device *net, struct ethtool_keee *edata) { struct r8152 *tp = netdev_priv(net); int ret; if (!tp->rtl_ops.eee_get) { ret = -EOPNOTSUPP; goto out; } ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out; mutex_lock(&tp->control); ret = tp->rtl_ops.eee_get(tp, edata); mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); out: return ret; } static int rtl_ethtool_set_eee(struct net_device *net, struct ethtool_keee *edata) { struct r8152 *tp = netdev_priv(net); int ret; if (!tp->rtl_ops.eee_set) { ret = -EOPNOTSUPP; goto out; } ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out; mutex_lock(&tp->control); ret = tp->rtl_ops.eee_set(tp, edata); if (!ret) ret = mii_nway_restart(&tp->mii); mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); out: return ret; } static int rtl8152_nway_reset(struct net_device *dev) { struct r8152 *tp = netdev_priv(dev); int ret; ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out; mutex_lock(&tp->control); ret = mii_nway_restart(&tp->mii); mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); out: return ret; } static int rtl8152_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct r8152 *tp = netdev_priv(netdev); switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: return -EOPNOTSUPP; default: break; } coalesce->rx_coalesce_usecs = tp->coalesce; return 0; } static int rtl8152_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct r8152 *tp = netdev_priv(netdev); int ret; switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: return -EOPNOTSUPP; default: break; } if (coalesce->rx_coalesce_usecs > COALESCE_SLOW) return -EINVAL; ret = usb_autopm_get_interface(tp->intf); if (ret < 0) return ret; mutex_lock(&tp->control); if (tp->coalesce != coalesce->rx_coalesce_usecs) { tp->coalesce = coalesce->rx_coalesce_usecs; if (netif_running(netdev) && netif_carrier_ok(netdev)) { netif_stop_queue(netdev); napi_disable(&tp->napi); tp->rtl_ops.disable(tp); tp->rtl_ops.enable(tp); rtl_start_rx(tp); clear_bit(RTL8152_SET_RX_MODE, &tp->flags); _rtl8152_set_rx_mode(netdev); napi_enable(&tp->napi); netif_wake_queue(netdev); } } mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); return ret; } static int rtl8152_get_tunable(struct net_device *netdev, const struct ethtool_tunable *tunable, void *d) { struct r8152 *tp = netdev_priv(netdev); switch (tunable->id) { case ETHTOOL_RX_COPYBREAK: *(u32 *)d = tp->rx_copybreak; break; default: return -EOPNOTSUPP; } return 0; } static int rtl8152_set_tunable(struct net_device *netdev, const struct ethtool_tunable *tunable, const void *d) { struct r8152 *tp = netdev_priv(netdev); u32 val; switch (tunable->id) { case ETHTOOL_RX_COPYBREAK: val = *(u32 *)d; if (val < ETH_ZLEN) { netif_err(tp, rx_err, netdev, "Invalid rx copy break value\n"); return -EINVAL; } if (tp->rx_copybreak != val) { if (netdev->flags & IFF_UP) { mutex_lock(&tp->control); napi_disable(&tp->napi); tp->rx_copybreak = val; napi_enable(&tp->napi); mutex_unlock(&tp->control); } else { tp->rx_copybreak = val; } } break; default: return -EOPNOTSUPP; } return 0; } static void rtl8152_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct r8152 *tp = netdev_priv(netdev); ring->rx_max_pending = RTL8152_RX_MAX_PENDING; ring->rx_pending = tp->rx_pending; } static int rtl8152_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct r8152 *tp = netdev_priv(netdev); if (ring->rx_pending < (RTL8152_MAX_RX * 2)) return -EINVAL; if (tp->rx_pending != ring->rx_pending) { if (netdev->flags & IFF_UP) { mutex_lock(&tp->control); napi_disable(&tp->napi); tp->rx_pending = ring->rx_pending; napi_enable(&tp->napi); mutex_unlock(&tp->control); } else { tp->rx_pending = ring->rx_pending; } } return 0; } static void rtl8152_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct r8152 *tp = netdev_priv(netdev); u16 bmcr, lcladv, rmtadv; u8 cap; if (usb_autopm_get_interface(tp->intf) < 0) return; mutex_lock(&tp->control); bmcr = r8152_mdio_read(tp, MII_BMCR); lcladv = r8152_mdio_read(tp, MII_ADVERTISE); rmtadv = r8152_mdio_read(tp, MII_LPA); mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); if (!(bmcr & BMCR_ANENABLE)) { pause->autoneg = 0; pause->rx_pause = 0; pause->tx_pause = 0; return; } pause->autoneg = 1; cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); if (cap & FLOW_CTRL_RX) pause->rx_pause = 1; if (cap & FLOW_CTRL_TX) pause->tx_pause = 1; } static int rtl8152_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct r8152 *tp = netdev_priv(netdev); u16 old, new1; u8 cap = 0; int ret; ret = usb_autopm_get_interface(tp->intf); if (ret < 0) return ret; mutex_lock(&tp->control); if (pause->autoneg && !(r8152_mdio_read(tp, MII_BMCR) & BMCR_ANENABLE)) { ret = -EINVAL; goto out; } if (pause->rx_pause) cap |= FLOW_CTRL_RX; if (pause->tx_pause) cap |= FLOW_CTRL_TX; old = r8152_mdio_read(tp, MII_ADVERTISE); new1 = (old & ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) | mii_advertise_flowctrl(cap); if (old != new1) r8152_mdio_write(tp, MII_ADVERTISE, new1); out: mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); return ret; } static const struct ethtool_ops ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS, .get_drvinfo = rtl8152_get_drvinfo, .get_link = ethtool_op_get_link, .nway_reset = rtl8152_nway_reset, .get_msglevel = rtl8152_get_msglevel, .set_msglevel = rtl8152_set_msglevel, .get_wol = rtl8152_get_wol, .set_wol = rtl8152_set_wol, .get_strings = rtl8152_get_strings, .get_sset_count = rtl8152_get_sset_count, .get_ethtool_stats = rtl8152_get_ethtool_stats, .get_coalesce = rtl8152_get_coalesce, .set_coalesce = rtl8152_set_coalesce, .get_eee = rtl_ethtool_get_eee, .set_eee = rtl_ethtool_set_eee, .get_link_ksettings = rtl8152_get_link_ksettings, .set_link_ksettings = rtl8152_set_link_ksettings, .get_tunable = rtl8152_get_tunable, .set_tunable = rtl8152_set_tunable, .get_ringparam = rtl8152_get_ringparam, .set_ringparam = rtl8152_set_ringparam, .get_pauseparam = rtl8152_get_pauseparam, .set_pauseparam = rtl8152_set_pauseparam, .get_ts_info = ethtool_op_get_ts_info, }; static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) { struct r8152 *tp = netdev_priv(netdev); struct mii_ioctl_data *data = if_mii(rq); int res; if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; res = usb_autopm_get_interface(tp->intf); if (res < 0) goto out; switch (cmd) { case SIOCGMIIPHY: data->phy_id = R8152_PHY_ID; /* Internal PHY */ break; case SIOCGMIIREG: mutex_lock(&tp->control); data->val_out = r8152_mdio_read(tp, data->reg_num); mutex_unlock(&tp->control); break; case SIOCSMIIREG: if (!capable(CAP_NET_ADMIN)) { res = -EPERM; break; } mutex_lock(&tp->control); r8152_mdio_write(tp, data->reg_num, data->val_in); mutex_unlock(&tp->control); break; default: res = -EOPNOTSUPP; } usb_autopm_put_interface(tp->intf); out: return res; } static int rtl8152_change_mtu(struct net_device *dev, int new_mtu) { struct r8152 *tp = netdev_priv(dev); int ret; switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: WRITE_ONCE(dev->mtu, new_mtu); return 0; default: break; } ret = usb_autopm_get_interface(tp->intf); if (ret < 0) return ret; mutex_lock(&tp->control); WRITE_ONCE(dev->mtu, new_mtu); if (netif_running(dev)) { if (tp->rtl_ops.change_mtu) tp->rtl_ops.change_mtu(tp); if (netif_carrier_ok(dev)) { netif_stop_queue(dev); napi_disable(&tp->napi); tasklet_disable(&tp->tx_tl); tp->rtl_ops.disable(tp); tp->rtl_ops.enable(tp); rtl_start_rx(tp); tasklet_enable(&tp->tx_tl); napi_enable(&tp->napi); rtl8152_set_rx_mode(dev); netif_wake_queue(dev); } } mutex_unlock(&tp->control); usb_autopm_put_interface(tp->intf); return ret; } static const struct net_device_ops rtl8152_netdev_ops = { .ndo_open = rtl8152_open, .ndo_stop = rtl8152_close, .ndo_eth_ioctl = rtl8152_ioctl, .ndo_start_xmit = rtl8152_start_xmit, .ndo_tx_timeout = rtl8152_tx_timeout, .ndo_set_features = rtl8152_set_features, .ndo_set_rx_mode = rtl8152_set_rx_mode, .ndo_set_mac_address = rtl8152_set_mac_address, .ndo_change_mtu = rtl8152_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_features_check = rtl8152_features_check, }; static void rtl8152_unload(struct r8152 *tp) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (tp->version != RTL_VER_01) r8152_power_cut_en(tp, true); } static void rtl8153_unload(struct r8152 *tp) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153_power_cut_en(tp, false); } static void rtl8153b_unload(struct r8152 *tp) { if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_power_cut_en(tp, false); } static int rtl_ops_init(struct r8152 *tp) { struct rtl_ops *ops = &tp->rtl_ops; int ret = 0; switch (tp->version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: ops->init = r8152b_init; ops->enable = rtl8152_enable; ops->disable = rtl8152_disable; ops->up = rtl8152_up; ops->down = rtl8152_down; ops->unload = rtl8152_unload; ops->eee_get = r8152_get_eee; ops->eee_set = r8152_set_eee; ops->in_nway = rtl8152_in_nway; ops->hw_phy_cfg = r8152b_hw_phy_cfg; ops->autosuspend_en = rtl_runtime_suspend_enable; tp->rx_buf_sz = 16 * 1024; tp->eee_en = true; tp->eee_adv = MDIO_EEE_100TX; break; case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: ops->init = r8153_init; ops->enable = rtl8153_enable; ops->disable = rtl8153_disable; ops->up = rtl8153_up; ops->down = rtl8153_down; ops->unload = rtl8153_unload; ops->eee_get = r8153_get_eee; ops->eee_set = r8152_set_eee; ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8153_hw_phy_cfg; ops->autosuspend_en = rtl8153_runtime_enable; ops->change_mtu = rtl8153_change_mtu; if (tp->udev->speed < USB_SPEED_SUPER) tp->rx_buf_sz = 16 * 1024; else tp->rx_buf_sz = 32 * 1024; tp->eee_en = true; tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; break; case RTL_VER_08: case RTL_VER_09: ops->init = r8153b_init; ops->enable = rtl8153_enable; ops->disable = rtl8153_disable; ops->up = rtl8153b_up; ops->down = rtl8153b_down; ops->unload = rtl8153b_unload; ops->eee_get = r8153_get_eee; ops->eee_set = r8152_set_eee; ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8153b_hw_phy_cfg; ops->autosuspend_en = rtl8153b_runtime_enable; ops->change_mtu = rtl8153_change_mtu; tp->rx_buf_sz = 32 * 1024; tp->eee_en = true; tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; break; case RTL_VER_11: tp->eee_en = true; tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; fallthrough; case RTL_VER_10: ops->init = r8156_init; ops->enable = rtl8156_enable; ops->disable = rtl8156_disable; ops->up = rtl8156_up; ops->down = rtl8156_down; ops->unload = rtl8153_unload; ops->eee_get = r8153_get_eee; ops->eee_set = r8152_set_eee; ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8156_hw_phy_cfg; ops->autosuspend_en = rtl8156_runtime_enable; ops->change_mtu = rtl8156_change_mtu; tp->rx_buf_sz = 48 * 1024; tp->support_2500full = 1; break; case RTL_VER_12: case RTL_VER_13: tp->support_2500full = 1; fallthrough; case RTL_VER_15: tp->eee_en = true; tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; ops->init = r8156b_init; ops->enable = rtl8156b_enable; ops->disable = rtl8153_disable; ops->up = rtl8156_up; ops->down = rtl8156_down; ops->unload = rtl8153_unload; ops->eee_get = r8153_get_eee; ops->eee_set = r8152_set_eee; ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8156b_hw_phy_cfg; ops->autosuspend_en = rtl8156_runtime_enable; ops->change_mtu = rtl8156_change_mtu; tp->rx_buf_sz = 48 * 1024; break; case RTL_VER_14: ops->init = r8153c_init; ops->enable = rtl8153_enable; ops->disable = rtl8153_disable; ops->up = rtl8153c_up; ops->down = rtl8153b_down; ops->unload = rtl8153_unload; ops->eee_get = r8153_get_eee; ops->eee_set = r8152_set_eee; ops->in_nway = rtl8153_in_nway; ops->hw_phy_cfg = r8153c_hw_phy_cfg; ops->autosuspend_en = rtl8153c_runtime_enable; ops->change_mtu = rtl8153c_change_mtu; tp->rx_buf_sz = 32 * 1024; tp->eee_en = true; tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX; break; default: ret = -ENODEV; dev_err(&tp->intf->dev, "Unknown Device\n"); break; } return ret; } #define FIRMWARE_8153A_2 "rtl_nic/rtl8153a-2.fw" #define FIRMWARE_8153A_3 "rtl_nic/rtl8153a-3.fw" #define FIRMWARE_8153A_4 "rtl_nic/rtl8153a-4.fw" #define FIRMWARE_8153B_2 "rtl_nic/rtl8153b-2.fw" #define FIRMWARE_8153C_1 "rtl_nic/rtl8153c-1.fw" #define FIRMWARE_8156A_2 "rtl_nic/rtl8156a-2.fw" #define FIRMWARE_8156B_2 "rtl_nic/rtl8156b-2.fw" MODULE_FIRMWARE(FIRMWARE_8153A_2); MODULE_FIRMWARE(FIRMWARE_8153A_3); MODULE_FIRMWARE(FIRMWARE_8153A_4); MODULE_FIRMWARE(FIRMWARE_8153B_2); MODULE_FIRMWARE(FIRMWARE_8153C_1); MODULE_FIRMWARE(FIRMWARE_8156A_2); MODULE_FIRMWARE(FIRMWARE_8156B_2); static int rtl_fw_init(struct r8152 *tp) { struct rtl_fw *rtl_fw = &tp->rtl_fw; switch (tp->version) { case RTL_VER_04: rtl_fw->fw_name = FIRMWARE_8153A_2; rtl_fw->pre_fw = r8153_pre_firmware_1; rtl_fw->post_fw = r8153_post_firmware_1; break; case RTL_VER_05: rtl_fw->fw_name = FIRMWARE_8153A_3; rtl_fw->pre_fw = r8153_pre_firmware_2; rtl_fw->post_fw = r8153_post_firmware_2; break; case RTL_VER_06: rtl_fw->fw_name = FIRMWARE_8153A_4; rtl_fw->post_fw = r8153_post_firmware_3; break; case RTL_VER_09: rtl_fw->fw_name = FIRMWARE_8153B_2; rtl_fw->pre_fw = r8153b_pre_firmware_1; rtl_fw->post_fw = r8153b_post_firmware_1; break; case RTL_VER_11: rtl_fw->fw_name = FIRMWARE_8156A_2; rtl_fw->post_fw = r8156a_post_firmware_1; break; case RTL_VER_13: case RTL_VER_15: rtl_fw->fw_name = FIRMWARE_8156B_2; break; case RTL_VER_14: rtl_fw->fw_name = FIRMWARE_8153C_1; rtl_fw->pre_fw = r8153b_pre_firmware_1; rtl_fw->post_fw = r8153c_post_firmware_1; break; default: break; } return 0; } static u8 __rtl_get_hw_ver(struct usb_device *udev) { u32 ocp_data = 0; __le32 *tmp; u8 version; int ret; int i; tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) return 0; /* Retry up to 3 times in case there is a transitory error. We do this * since retrying a read of the version is always safe and this * function doesn't take advantage of r8152_control_msg(). */ for (i = 0; i < 3; i++) { ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp), USB_CTRL_GET_TIMEOUT); if (ret > 0) { ocp_data = (__le32_to_cpu(*tmp) >> 16) & VERSION_MASK; break; } } if (i != 0 && ret > 0) dev_warn(&udev->dev, "Needed %d retries to read version\n", i); kfree(tmp); switch (ocp_data) { case 0x4c00: version = RTL_VER_01; break; case 0x4c10: version = RTL_VER_02; break; case 0x5c00: version = RTL_VER_03; break; case 0x5c10: version = RTL_VER_04; break; case 0x5c20: version = RTL_VER_05; break; case 0x5c30: version = RTL_VER_06; break; case 0x4800: version = RTL_VER_07; break; case 0x6000: version = RTL_VER_08; break; case 0x6010: version = RTL_VER_09; break; case 0x7010: version = RTL_TEST_01; break; case 0x7020: version = RTL_VER_10; break; case 0x7030: version = RTL_VER_11; break; case 0x7400: version = RTL_VER_12; break; case 0x7410: version = RTL_VER_13; break; case 0x6400: version = RTL_VER_14; break; case 0x7420: version = RTL_VER_15; break; default: version = RTL_VER_UNKNOWN; dev_info(&udev->dev, "Unknown version 0x%04x\n", ocp_data); break; } return version; } u8 rtl8152_get_version(struct usb_interface *intf) { u8 version; version = __rtl_get_hw_ver(interface_to_usbdev(intf)); dev_dbg(&intf->dev, "Detected version 0x%04x\n", version); return version; } EXPORT_SYMBOL_GPL(rtl8152_get_version); static bool rtl8152_supports_lenovo_macpassthru(struct usb_device *udev) { int parent_vendor_id = le16_to_cpu(udev->parent->descriptor.idVendor); int product_id = le16_to_cpu(udev->descriptor.idProduct); int vendor_id = le16_to_cpu(udev->descriptor.idVendor); if (vendor_id == VENDOR_ID_LENOVO) { switch (product_id) { case DEVICE_ID_LENOVO_USB_C_TRAVEL_HUB: case DEVICE_ID_THINKPAD_ONELINK_PLUS_DOCK: case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2: case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2: case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3: case DEVICE_ID_THINKPAD_USB_C_DONGLE: case DEVICE_ID_THINKPAD_HYBRID_USB_C_DOCK: return 1; } } else if (vendor_id == VENDOR_ID_REALTEK && parent_vendor_id == VENDOR_ID_LENOVO) { switch (product_id) { case 0x8153: return 1; } } return 0; } static int rtl8152_probe_once(struct usb_interface *intf, const struct usb_device_id *id, u8 version) { struct usb_device *udev = interface_to_usbdev(intf); struct r8152 *tp; struct net_device *netdev; int ret; usb_reset_device(udev); netdev = alloc_etherdev(sizeof(struct r8152)); if (!netdev) { dev_err(&intf->dev, "Out of memory\n"); return -ENOMEM; } SET_NETDEV_DEV(netdev, &intf->dev); tp = netdev_priv(netdev); tp->msg_enable = 0x7FFF; tp->udev = udev; tp->netdev = netdev; tp->intf = intf; tp->version = version; tp->pipe_ctrl_in = usb_rcvctrlpipe(udev, 0); tp->pipe_ctrl_out = usb_sndctrlpipe(udev, 0); tp->pipe_in = usb_rcvbulkpipe(udev, 1); tp->pipe_out = usb_sndbulkpipe(udev, 2); tp->pipe_intr = usb_rcvintpipe(udev, 3); switch (version) { case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: tp->mii.supports_gmii = 0; break; default: tp->mii.supports_gmii = 1; break; } ret = rtl_ops_init(tp); if (ret) goto out; rtl_fw_init(tp); mutex_init(&tp->control); INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t); INIT_DELAYED_WORK(&tp->hw_phy_work, rtl_hw_phy_work_func_t); tasklet_setup(&tp->tx_tl, bottom_half); tasklet_disable(&tp->tx_tl); netdev->netdev_ops = &rtl8152_netdev_ops; netdev->watchdog_timeo = RTL8152_TX_TIMEOUT; netdev->features |= NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_FRAGLIST | NETIF_F_IPV6_CSUM | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX; netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_FRAGLIST | NETIF_F_IPV6_CSUM | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX; netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | NETIF_F_IPV6_CSUM | NETIF_F_TSO6; if (tp->version == RTL_VER_01) { netdev->features &= ~NETIF_F_RXCSUM; netdev->hw_features &= ~NETIF_F_RXCSUM; } tp->lenovo_macpassthru = rtl8152_supports_lenovo_macpassthru(udev); if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial && (!strcmp(udev->serial, "000001000000") || !strcmp(udev->serial, "000002000000"))) { dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation"); tp->dell_tb_rx_agg_bug = 1; } netdev->ethtool_ops = &ops; netif_set_tso_max_size(netdev, RTL_LIMITED_TSO_SIZE); /* MTU range: 68 - 1500 or 9194 */ netdev->min_mtu = ETH_MIN_MTU; switch (tp->version) { case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: case RTL_VER_06: case RTL_VER_08: case RTL_VER_09: case RTL_VER_14: netdev->max_mtu = size_to_mtu(9 * 1024); break; case RTL_VER_10: case RTL_VER_11: netdev->max_mtu = size_to_mtu(15 * 1024); break; case RTL_VER_12: case RTL_VER_13: case RTL_VER_15: netdev->max_mtu = size_to_mtu(16 * 1024); break; case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: default: netdev->max_mtu = ETH_DATA_LEN; break; } tp->mii.dev = netdev; tp->mii.mdio_read = read_mii_word; tp->mii.mdio_write = write_mii_word; tp->mii.phy_id_mask = 0x3f; tp->mii.reg_num_mask = 0x1f; tp->mii.phy_id = R8152_PHY_ID; tp->autoneg = AUTONEG_ENABLE; tp->speed = SPEED_100; tp->advertising = RTL_ADVERTISED_10_HALF | RTL_ADVERTISED_10_FULL | RTL_ADVERTISED_100_HALF | RTL_ADVERTISED_100_FULL; if (tp->mii.supports_gmii) { if (tp->support_2500full && tp->udev->speed >= USB_SPEED_SUPER) { tp->speed = SPEED_2500; tp->advertising |= RTL_ADVERTISED_2500_FULL; } else { tp->speed = SPEED_1000; } tp->advertising |= RTL_ADVERTISED_1000_FULL; } tp->duplex = DUPLEX_FULL; tp->rx_copybreak = RTL8152_RXFG_HEADSZ; tp->rx_pending = 10 * RTL8152_MAX_RX; intf->needs_remote_wakeup = 1; if (!rtl_can_wakeup(tp)) __rtl_set_wol(tp, 0); else tp->saved_wolopts = __rtl_get_wol(tp); tp->rtl_ops.init(tp); #if IS_BUILTIN(CONFIG_USB_RTL8152) /* Retry in case request_firmware() is not ready yet. */ tp->rtl_fw.retry = true; #endif queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0); set_ethernet_addr(tp, false); usb_set_intfdata(intf, tp); netif_napi_add(netdev, &tp->napi, r8152_poll); ret = register_netdev(netdev); if (ret != 0) { dev_err(&intf->dev, "couldn't register the device\n"); goto out1; } if (tp->saved_wolopts) device_set_wakeup_enable(&udev->dev, true); else device_set_wakeup_enable(&udev->dev, false); /* If we saw a control transfer error while probing then we may * want to try probe() again. Consider this an error. */ if (test_bit(PROBE_SHOULD_RETRY, &tp->flags)) goto out2; set_bit(PROBED_WITH_NO_ERRORS, &tp->flags); netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION); return 0; out2: unregister_netdev(netdev); out1: tasklet_kill(&tp->tx_tl); cancel_delayed_work_sync(&tp->hw_phy_work); if (tp->rtl_ops.unload) tp->rtl_ops.unload(tp); rtl8152_release_firmware(tp); usb_set_intfdata(intf, NULL); out: if (test_bit(PROBE_SHOULD_RETRY, &tp->flags)) ret = -EAGAIN; free_netdev(netdev); return ret; } #define RTL8152_PROBE_TRIES 3 static int rtl8152_probe(struct usb_interface *intf, const struct usb_device_id *id) { u8 version; int ret; int i; if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC) return -ENODEV; if (!rtl_check_vendor_ok(intf)) return -ENODEV; version = rtl8152_get_version(intf); if (version == RTL_VER_UNKNOWN) return -ENODEV; for (i = 0; i < RTL8152_PROBE_TRIES; i++) { ret = rtl8152_probe_once(intf, id, version); if (ret != -EAGAIN) break; } if (ret == -EAGAIN) { dev_err(&intf->dev, "r8152 failed probe after %d tries; giving up\n", i); return -ENODEV; } return ret; } static void rtl8152_disconnect(struct usb_interface *intf) { struct r8152 *tp = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (tp) { rtl_set_unplug(tp); unregister_netdev(tp->netdev); tasklet_kill(&tp->tx_tl); cancel_delayed_work_sync(&tp->hw_phy_work); if (tp->rtl_ops.unload) tp->rtl_ops.unload(tp); rtl8152_release_firmware(tp); free_netdev(tp->netdev); } } /* table of devices that work with this driver */ static const struct usb_device_id rtl8152_table[] = { /* Realtek */ { USB_DEVICE(VENDOR_ID_REALTEK, 0x8050) }, { USB_DEVICE(VENDOR_ID_REALTEK, 0x8053) }, { USB_DEVICE(VENDOR_ID_REALTEK, 0x8152) }, { USB_DEVICE(VENDOR_ID_REALTEK, 0x8153) }, { USB_DEVICE(VENDOR_ID_REALTEK, 0x8155) }, { USB_DEVICE(VENDOR_ID_REALTEK, 0x8156) }, /* Microsoft */ { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab) }, { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6) }, { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927) }, { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0c5e) }, { USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101) }, /* Lenovo */ { USB_DEVICE(VENDOR_ID_LENOVO, 0x304f) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x3054) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x3062) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x3069) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x3082) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x3098) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x7205) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x720c) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x7214) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x721e) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0xa359) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0xa387) }, { USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041) }, { USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff) }, { USB_DEVICE(VENDOR_ID_TPLINK, 0x0601) }, { USB_DEVICE(VENDOR_ID_TPLINK, 0x0602) }, { USB_DEVICE(VENDOR_ID_DLINK, 0xb301) }, { USB_DEVICE(VENDOR_ID_DELL, 0xb097) }, { USB_DEVICE(VENDOR_ID_ASUS, 0x1976) }, {} }; MODULE_DEVICE_TABLE(usb, rtl8152_table); static struct usb_driver rtl8152_driver = { .name = MODULENAME, .id_table = rtl8152_table, .probe = rtl8152_probe, .disconnect = rtl8152_disconnect, .suspend = rtl8152_suspend, .resume = rtl8152_resume, .reset_resume = rtl8152_reset_resume, .pre_reset = rtl8152_pre_reset, .post_reset = rtl8152_post_reset, .supports_autosuspend = 1, .disable_hub_initiated_lpm = 1, }; static int rtl8152_cfgselector_choose_configuration(struct usb_device *udev) { struct usb_host_config *c; int i, num_configs; /* Switch the device to vendor mode, if and only if the vendor mode * driver supports it. */ if (__rtl_get_hw_ver(udev) == RTL_VER_UNKNOWN) return -ENODEV; /* The vendor mode is not always config #1, so to find it out. */ c = udev->config; num_configs = udev->descriptor.bNumConfigurations; for (i = 0; i < num_configs; (i++, c++)) { struct usb_interface_descriptor *desc = NULL; if (!c->desc.bNumInterfaces) continue; desc = &c->intf_cache[0]->altsetting->desc; if (desc->bInterfaceClass == USB_CLASS_VENDOR_SPEC) break; } if (i == num_configs) return -ENODEV; return c->desc.bConfigurationValue; } static struct usb_device_driver rtl8152_cfgselector_driver = { .name = MODULENAME "-cfgselector", .choose_configuration = rtl8152_cfgselector_choose_configuration, .id_table = rtl8152_table, .generic_subclass = 1, .supports_autosuspend = 1, }; static int __init rtl8152_driver_init(void) { int ret; ret = usb_register_device_driver(&rtl8152_cfgselector_driver, THIS_MODULE); if (ret) return ret; ret = usb_register(&rtl8152_driver); if (ret) usb_deregister_device_driver(&rtl8152_cfgselector_driver); return ret; } static void __exit rtl8152_driver_exit(void) { usb_deregister(&rtl8152_driver); usb_deregister_device_driver(&rtl8152_cfgselector_driver); } module_init(rtl8152_driver_init); module_exit(rtl8152_driver_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_VERSION(DRIVER_VERSION);
4 5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 // SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/msdos/namei.c * * Written 1992,1993 by Werner Almesberger * Hidden files 1995 by Albert Cahalan <albert@ccs.neu.edu> <adc@coe.neu.edu> * Rewritten for constant inumbers 1999 by Al Viro */ #include <linux/module.h> #include <linux/iversion.h> #include "fat.h" /* Characters that are undesirable in an MS-DOS file name */ static unsigned char bad_chars[] = "*?<>|\""; static unsigned char bad_if_strict[] = "+=,; "; /***** Formats an MS-DOS file name. Rejects invalid names. */ static int msdos_format_name(const unsigned char *name, int len, unsigned char *res, struct fat_mount_options *opts) /* * name is the proposed name, len is its length, res is * the resulting name, opts->name_check is either (r)elaxed, * (n)ormal or (s)trict, opts->dotsOK allows dots at the * beginning of name (for hidden files) */ { unsigned char *walk; unsigned char c; int space; if (name[0] == '.') { /* dotfile because . and .. already done */ if (opts->dotsOK) { /* Get rid of dot - test for it elsewhere */ name++; len--; } else return -EINVAL; } /* * disallow names that _really_ start with a dot */ space = 1; c = 0; for (walk = res; len && walk - res < 8; walk++) { c = *name++; len--; if (opts->name_check != 'r' && strchr(bad_chars, c)) return -EINVAL; if (opts->name_check == 's' && strchr(bad_if_strict, c)) return -EINVAL; if (c >= 'A' && c <= 'Z' && opts->name_check == 's') return -EINVAL; if (c < ' ' || c == ':' || c == '\\') return -EINVAL; /* * 0xE5 is legal as a first character, but we must substitute * 0x05 because 0xE5 marks deleted files. Yes, DOS really * does this. * It seems that Microsoft hacked DOS to support non-US * characters after the 0xE5 character was already in use to * mark deleted files. */ if ((res == walk) && (c == 0xE5)) c = 0x05; if (c == '.') break; space = (c == ' '); *walk = (!opts->nocase && c >= 'a' && c <= 'z') ? c - 32 : c; } if (space) return -EINVAL; if (opts->name_check == 's' && len && c != '.') { c = *name++; len--; if (c != '.') return -EINVAL; } while (c != '.' && len--) c = *name++; if (c == '.') { while (walk - res < 8) *walk++ = ' '; while (len > 0 && walk - res < MSDOS_NAME) { c = *name++; len--; if (opts->name_check != 'r' && strchr(bad_chars, c)) return -EINVAL; if (opts->name_check == 's' && strchr(bad_if_strict, c)) return -EINVAL; if (c < ' ' || c == ':' || c == '\\') return -EINVAL; if (c == '.') { if (opts->name_check == 's') return -EINVAL; break; } if (c >= 'A' && c <= 'Z' && opts->name_check == 's') return -EINVAL; space = c == ' '; if (!opts->nocase && c >= 'a' && c <= 'z') *walk++ = c - 32; else *walk++ = c; } if (space) return -EINVAL; if (opts->name_check == 's' && len) return -EINVAL; } while (walk - res < MSDOS_NAME) *walk++ = ' '; return 0; } /***** Locates a directory entry. Uses unformatted name. */ static int msdos_find(struct inode *dir, const unsigned char *name, int len, struct fat_slot_info *sinfo) { struct msdos_sb_info *sbi = MSDOS_SB(dir->i_sb); unsigned char msdos_name[MSDOS_NAME]; int err; err = msdos_format_name(name, len, msdos_name, &sbi->options); if (err) return -ENOENT; err = fat_scan(dir, msdos_name, sinfo); if (!err && sbi->options.dotsOK) { if (name[0] == '.') { if (!(sinfo->de->attr & ATTR_HIDDEN)) err = -ENOENT; } else { if (sinfo->de->attr & ATTR_HIDDEN) err = -ENOENT; } if (err) brelse(sinfo->bh); } return err; } /* * Compute the hash for the msdos name corresponding to the dentry. * Note: if the name is invalid, we leave the hash code unchanged so * that the existing dentry can be used. The msdos fs routines will * return ENOENT or EINVAL as appropriate. */ static int msdos_hash(const struct dentry *dentry, struct qstr *qstr) { struct fat_mount_options *options = &MSDOS_SB(dentry->d_sb)->options; unsigned char msdos_name[MSDOS_NAME]; int error; error = msdos_format_name(qstr->name, qstr->len, msdos_name, options); if (!error) qstr->hash = full_name_hash(dentry, msdos_name, MSDOS_NAME); return 0; } /* * Compare two msdos names. If either of the names are invalid, * we fall back to doing the standard name comparison. */ static int msdos_cmp(const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { struct fat_mount_options *options = &MSDOS_SB(dentry->d_sb)->options; unsigned char a_msdos_name[MSDOS_NAME], b_msdos_name[MSDOS_NAME]; int error; error = msdos_format_name(name->name, name->len, a_msdos_name, options); if (error) goto old_compare; error = msdos_format_name(str, len, b_msdos_name, options); if (error) goto old_compare; error = memcmp(a_msdos_name, b_msdos_name, MSDOS_NAME); out: return error; old_compare: error = 1; if (name->len == len) error = memcmp(name->name, str, len); goto out; } static const struct dentry_operations msdos_dentry_operations = { .d_hash = msdos_hash, .d_compare = msdos_cmp, }; /* * AV. Wrappers for FAT sb operations. Is it wise? */ /***** Get inode using directory and name */ static struct dentry *msdos_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct super_block *sb = dir->i_sb; struct fat_slot_info sinfo; struct inode *inode; int err; mutex_lock(&MSDOS_SB(sb)->s_lock); err = msdos_find(dir, dentry->d_name.name, dentry->d_name.len, &sinfo); switch (err) { case -ENOENT: inode = NULL; break; case 0: inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos); brelse(sinfo.bh); break; default: inode = ERR_PTR(err); } mutex_unlock(&MSDOS_SB(sb)->s_lock); return d_splice_alias(inode, dentry); } /***** Creates a directory entry (name is already formatted). */ static int msdos_add_entry(struct inode *dir, const unsigned char *name, int is_dir, int is_hid, int cluster, struct timespec64 *ts, struct fat_slot_info *sinfo) { struct msdos_sb_info *sbi = MSDOS_SB(dir->i_sb); struct msdos_dir_entry de; __le16 time, date; int err; memcpy(de.name, name, MSDOS_NAME); de.attr = is_dir ? ATTR_DIR : ATTR_ARCH; if (is_hid) de.attr |= ATTR_HIDDEN; de.lcase = 0; fat_time_unix2fat(sbi, ts, &time, &date, NULL); de.cdate = de.adate = 0; de.ctime = 0; de.ctime_cs = 0; de.time = time; de.date = date; fat_set_start(&de, cluster); de.size = 0; err = fat_add_entries(dir, &de, 1, sinfo); if (err) return err; fat_truncate_time(dir, ts, S_CTIME|S_MTIME); if (IS_DIRSYNC(dir)) (void)fat_sync_inode(dir); else mark_inode_dirty(dir); return 0; } /***** Create a file */ static int msdos_create(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { struct super_block *sb = dir->i_sb; struct inode *inode = NULL; struct fat_slot_info sinfo; struct timespec64 ts; unsigned char msdos_name[MSDOS_NAME]; int err, is_hid; mutex_lock(&MSDOS_SB(sb)->s_lock); err = msdos_format_name(dentry->d_name.name, dentry->d_name.len, msdos_name, &MSDOS_SB(sb)->options); if (err) goto out; is_hid = (dentry->d_name.name[0] == '.') && (msdos_name[0] != '.'); /* Have to do it due to foo vs. .foo conflicts */ if (!fat_scan(dir, msdos_name, &sinfo)) { brelse(sinfo.bh); err = -EINVAL; goto out; } ts = current_time(dir); err = msdos_add_entry(dir, msdos_name, 0, is_hid, 0, &ts, &sinfo); if (err) goto out; inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos); brelse(sinfo.bh); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out; } fat_truncate_time(inode, &ts, S_ATIME|S_CTIME|S_MTIME); /* timestamp is already written, so mark_inode_dirty() is unneeded. */ d_instantiate(dentry, inode); out: mutex_unlock(&MSDOS_SB(sb)->s_lock); if (!err) err = fat_flush_inodes(sb, dir, inode); return err; } /***** Remove a directory */ static int msdos_rmdir(struct inode *dir, struct dentry *dentry) { struct super_block *sb = dir->i_sb; struct inode *inode = d_inode(dentry); struct fat_slot_info sinfo; int err; mutex_lock(&MSDOS_SB(sb)->s_lock); err = fat_dir_empty(inode); if (err) goto out; err = msdos_find(dir, dentry->d_name.name, dentry->d_name.len, &sinfo); if (err) goto out; err = fat_remove_entries(dir, &sinfo); /* and releases bh */ if (err) goto out; drop_nlink(dir); clear_nlink(inode); fat_truncate_time(inode, NULL, S_CTIME); fat_detach(inode); out: mutex_unlock(&MSDOS_SB(sb)->s_lock); if (!err) err = fat_flush_inodes(sb, dir, inode); return err; } /***** Make a directory */ static struct dentry *msdos_mkdir(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) { struct super_block *sb = dir->i_sb; struct fat_slot_info sinfo; struct inode *inode; unsigned char msdos_name[MSDOS_NAME]; struct timespec64 ts; int err, is_hid, cluster; mutex_lock(&MSDOS_SB(sb)->s_lock); err = msdos_format_name(dentry->d_name.name, dentry->d_name.len, msdos_name, &MSDOS_SB(sb)->options); if (err) goto out; is_hid = (dentry->d_name.name[0] == '.') && (msdos_name[0] != '.'); /* foo vs .foo situation */ if (!fat_scan(dir, msdos_name, &sinfo)) { brelse(sinfo.bh); err = -EINVAL; goto out; } ts = current_time(dir); cluster = fat_alloc_new_dir(dir, &ts); if (cluster < 0) { err = cluster; goto out; } err = msdos_add_entry(dir, msdos_name, 1, is_hid, cluster, &ts, &sinfo); if (err) goto out_free; inc_nlink(dir); inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos); brelse(sinfo.bh); if (IS_ERR(inode)) { err = PTR_ERR(inode); /* the directory was completed, just return a error */ goto out; } set_nlink(inode, 2); fat_truncate_time(inode, &ts, S_ATIME|S_CTIME|S_MTIME); /* timestamp is already written, so mark_inode_dirty() is unneeded. */ d_instantiate(dentry, inode); mutex_unlock(&MSDOS_SB(sb)->s_lock); fat_flush_inodes(sb, dir, inode); return NULL; out_free: fat_free_clusters(dir, cluster); out: mutex_unlock(&MSDOS_SB(sb)->s_lock); return ERR_PTR(err); } /***** Unlink a file */ static int msdos_unlink(struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(dentry); struct super_block *sb = inode->i_sb; struct fat_slot_info sinfo; int err; mutex_lock(&MSDOS_SB(sb)->s_lock); err = msdos_find(dir, dentry->d_name.name, dentry->d_name.len, &sinfo); if (err) goto out; err = fat_remove_entries(dir, &sinfo); /* and releases bh */ if (err) goto out; clear_nlink(inode); fat_truncate_time(inode, NULL, S_CTIME); fat_detach(inode); out: mutex_unlock(&MSDOS_SB(sb)->s_lock); if (!err) err = fat_flush_inodes(sb, dir, inode); return err; } static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name, struct dentry *old_dentry, struct inode *new_dir, unsigned char *new_name, struct dentry *new_dentry, int is_hid) { struct buffer_head *dotdot_bh; struct msdos_dir_entry *dotdot_de; struct inode *old_inode, *new_inode; struct fat_slot_info old_sinfo, sinfo; struct timespec64 ts; loff_t new_i_pos; int err, old_attrs, is_dir, update_dotdot, corrupt = 0; old_sinfo.bh = sinfo.bh = dotdot_bh = NULL; old_inode = d_inode(old_dentry); new_inode = d_inode(new_dentry); err = fat_scan(old_dir, old_name, &old_sinfo); if (err) { err = -EIO; goto out; } is_dir = S_ISDIR(old_inode->i_mode); update_dotdot = (is_dir && old_dir != new_dir); if (update_dotdot) { if (fat_get_dotdot_entry(old_inode, &dotdot_bh, &dotdot_de)) { err = -EIO; goto out; } } old_attrs = MSDOS_I(old_inode)->i_attrs; err = fat_scan(new_dir, new_name, &sinfo); if (!err) { if (!new_inode) { /* "foo" -> ".foo" case. just change the ATTR_HIDDEN */ if (sinfo.de != old_sinfo.de) { err = -EINVAL; goto out; } if (is_hid) MSDOS_I(old_inode)->i_attrs |= ATTR_HIDDEN; else MSDOS_I(old_inode)->i_attrs &= ~ATTR_HIDDEN; if (IS_DIRSYNC(old_dir)) { err = fat_sync_inode(old_inode); if (err) { MSDOS_I(old_inode)->i_attrs = old_attrs; goto out; } } else mark_inode_dirty(old_inode); inode_inc_iversion(old_dir); fat_truncate_time(old_dir, NULL, S_CTIME|S_MTIME); if (IS_DIRSYNC(old_dir)) (void)fat_sync_inode(old_dir); else mark_inode_dirty(old_dir); goto out; } } ts = current_time(old_inode); if (new_inode) { if (err) goto out; if (is_dir) { err = fat_dir_empty(new_inode); if (err) goto out; } new_i_pos = MSDOS_I(new_inode)->i_pos; fat_detach(new_inode); } else { err = msdos_add_entry(new_dir, new_name, is_dir, is_hid, 0, &ts, &sinfo); if (err) goto out; new_i_pos = sinfo.i_pos; } inode_inc_iversion(new_dir); fat_detach(old_inode); fat_attach(old_inode, new_i_pos); if (is_hid) MSDOS_I(old_inode)->i_attrs |= ATTR_HIDDEN; else MSDOS_I(old_inode)->i_attrs &= ~ATTR_HIDDEN; if (IS_DIRSYNC(new_dir)) { err = fat_sync_inode(old_inode); if (err) goto error_inode; } else mark_inode_dirty(old_inode); if (update_dotdot) { fat_set_start(dotdot_de, MSDOS_I(new_dir)->i_logstart); mark_buffer_dirty_inode(dotdot_bh, old_inode); if (IS_DIRSYNC(new_dir)) { err = sync_dirty_buffer(dotdot_bh); if (err) goto error_dotdot; } drop_nlink(old_dir); if (!new_inode) inc_nlink(new_dir); } err = fat_remove_entries(old_dir, &old_sinfo); /* and releases bh */ old_sinfo.bh = NULL; if (err) goto error_dotdot; inode_inc_iversion(old_dir); fat_truncate_time(old_dir, &ts, S_CTIME|S_MTIME); if (IS_DIRSYNC(old_dir)) (void)fat_sync_inode(old_dir); else mark_inode_dirty(old_dir); if (new_inode) { drop_nlink(new_inode); if (is_dir) drop_nlink(new_inode); fat_truncate_time(new_inode, &ts, S_CTIME); } out: brelse(sinfo.bh); brelse(dotdot_bh); brelse(old_sinfo.bh); return err; error_dotdot: /* data cluster is shared, serious corruption */ corrupt = 1; if (update_dotdot) { fat_set_start(dotdot_de, MSDOS_I(old_dir)->i_logstart); mark_buffer_dirty_inode(dotdot_bh, old_inode); corrupt |= sync_dirty_buffer(dotdot_bh); } error_inode: fat_detach(old_inode); fat_attach(old_inode, old_sinfo.i_pos); MSDOS_I(old_inode)->i_attrs = old_attrs; if (new_inode) { fat_attach(new_inode, new_i_pos); if (corrupt) corrupt |= fat_sync_inode(new_inode); } else { /* * If new entry was not sharing the data cluster, it * shouldn't be serious corruption. */ int err2 = fat_remove_entries(new_dir, &sinfo); if (corrupt) corrupt |= err2; sinfo.bh = NULL; } if (corrupt < 0) { fat_fs_error(new_dir->i_sb, "%s: Filesystem corrupted (i_pos %lld)", __func__, sinfo.i_pos); } goto out; } /***** Rename, a wrapper for rename_same_dir & rename_diff_dir */ static int msdos_rename(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { struct super_block *sb = old_dir->i_sb; unsigned char old_msdos_name[MSDOS_NAME], new_msdos_name[MSDOS_NAME]; int err, is_hid; if (flags & ~RENAME_NOREPLACE) return -EINVAL; mutex_lock(&MSDOS_SB(sb)->s_lock); err = msdos_format_name(old_dentry->d_name.name, old_dentry->d_name.len, old_msdos_name, &MSDOS_SB(old_dir->i_sb)->options); if (err) goto out; err = msdos_format_name(new_dentry->d_name.name, new_dentry->d_name.len, new_msdos_name, &MSDOS_SB(new_dir->i_sb)->options); if (err) goto out; is_hid = (new_dentry->d_name.name[0] == '.') && (new_msdos_name[0] != '.'); err = do_msdos_rename(old_dir, old_msdos_name, old_dentry, new_dir, new_msdos_name, new_dentry, is_hid); out: mutex_unlock(&MSDOS_SB(sb)->s_lock); if (!err) err = fat_flush_inodes(sb, old_dir, new_dir); return err; } static const struct inode_operations msdos_dir_inode_operations = { .create = msdos_create, .lookup = msdos_lookup, .unlink = msdos_unlink, .mkdir = msdos_mkdir, .rmdir = msdos_rmdir, .rename = msdos_rename, .setattr = fat_setattr, .getattr = fat_getattr, .update_time = fat_update_time, }; static void setup(struct super_block *sb) { MSDOS_SB(sb)->dir_ops = &msdos_dir_inode_operations; set_default_d_op(sb, &msdos_dentry_operations); sb->s_flags |= SB_NOATIME; } static int msdos_fill_super(struct super_block *sb, struct fs_context *fc) { return fat_fill_super(sb, fc, setup); } static int msdos_get_tree(struct fs_context *fc) { return get_tree_bdev(fc, msdos_fill_super); } static int msdos_parse_param(struct fs_context *fc, struct fs_parameter *param) { return fat_parse_param(fc, param, false); } static const struct fs_context_operations msdos_context_ops = { .parse_param = msdos_parse_param, .get_tree = msdos_get_tree, .reconfigure = fat_reconfigure, .free = fat_free_fc, }; static int msdos_init_fs_context(struct fs_context *fc) { int err; /* Initialize with is_vfat == false */ err = fat_init_fs_context(fc, false); if (err) return err; fc->ops = &msdos_context_ops; return 0; } static struct file_system_type msdos_fs_type = { .owner = THIS_MODULE, .name = "msdos", .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP, .init_fs_context = msdos_init_fs_context, .parameters = fat_param_spec, }; MODULE_ALIAS_FS("msdos"); static int __init init_msdos_fs(void) { return register_filesystem(&msdos_fs_type); } static void __exit exit_msdos_fs(void) { unregister_filesystem(&msdos_fs_type); } MODULE_LICENSE("GPL"); MODULE_AUTHOR("Werner Almesberger"); MODULE_DESCRIPTION("MS-DOS filesystem support"); module_init(init_msdos_fs) module_exit(exit_msdos_fs)
9 9 9 9 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 // SPDX-License-Identifier: GPL-2.0-only /* * linux/drivers/cpufreq/cpufreq.c * * Copyright (C) 2001 Russell King * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org> * * Oct 2005 - Ashok Raj <ashok.raj@intel.com> * Added handling for CPU hotplug * Feb 2006 - Jacob Shin <jacob.shin@amd.com> * Fix handling for CPU hotplug -- affected CPUs */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/cpu.h> #include <linux/cpufreq.h> #include <linux/cpu_cooling.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/init.h> #include <linux/kernel_stat.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/pm_qos.h> #include <linux/slab.h> #include <linux/string_choices.h> #include <linux/suspend.h> #include <linux/syscore_ops.h> #include <linux/tick.h> #include <linux/units.h> #include <trace/events/power.h> static LIST_HEAD(cpufreq_policy_list); /* Macros to iterate over CPU policies */ #define for_each_suitable_policy(__policy, __active) \ list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \ if ((__active) == !policy_is_inactive(__policy)) #define for_each_active_policy(__policy) \ for_each_suitable_policy(__policy, true) #define for_each_inactive_policy(__policy) \ for_each_suitable_policy(__policy, false) /* Iterate over governors */ static LIST_HEAD(cpufreq_governor_list); #define for_each_governor(__governor) \ list_for_each_entry(__governor, &cpufreq_governor_list, governor_list) static char default_governor[CPUFREQ_NAME_LEN]; /* * The "cpufreq driver" - the arch- or hardware-dependent low * level driver of CPUFreq support, and its spinlock. This lock * also protects the cpufreq_cpu_data array. */ static struct cpufreq_driver *cpufreq_driver; static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); static DEFINE_RWLOCK(cpufreq_driver_lock); static DEFINE_STATIC_KEY_FALSE(cpufreq_freq_invariance); bool cpufreq_supports_freq_invariance(void) { return static_branch_likely(&cpufreq_freq_invariance); } /* Flag to suspend/resume CPUFreq governors */ static bool cpufreq_suspended; static inline bool has_target(void) { return cpufreq_driver->target_index || cpufreq_driver->target; } bool has_target_index(void) { return !!cpufreq_driver->target_index; } /* internal prototypes */ static unsigned int __cpufreq_get(struct cpufreq_policy *policy); static int cpufreq_init_governor(struct cpufreq_policy *policy); static void cpufreq_exit_governor(struct cpufreq_policy *policy); static void cpufreq_governor_limits(struct cpufreq_policy *policy); static int cpufreq_set_policy(struct cpufreq_policy *policy, struct cpufreq_governor *new_gov, unsigned int new_pol); static bool cpufreq_boost_supported(void); static int cpufreq_boost_trigger_state(int state); /* * Two notifier lists: the "policy" list is involved in the * validation process for a new CPU frequency policy; the * "transition" list for kernel code that needs to handle * changes to devices when the CPU clock speed changes. * The mutex locks both lists. */ static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list); SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list); static int off __read_mostly; static int cpufreq_disabled(void) { return off; } void disable_cpufreq(void) { off = 1; } EXPORT_SYMBOL_GPL(disable_cpufreq); static DEFINE_MUTEX(cpufreq_governor_mutex); bool have_governor_per_policy(void) { return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY); } EXPORT_SYMBOL_GPL(have_governor_per_policy); static struct kobject *cpufreq_global_kobject; struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy) { if (have_governor_per_policy()) return &policy->kobj; else return cpufreq_global_kobject; } EXPORT_SYMBOL_GPL(get_governor_parent_kobj); static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) { struct kernel_cpustat kcpustat; u64 cur_wall_time; u64 idle_time; u64 busy_time; cur_wall_time = jiffies64_to_nsecs(get_jiffies_64()); kcpustat_cpu_fetch(&kcpustat, cpu); busy_time = kcpustat.cpustat[CPUTIME_USER]; busy_time += kcpustat.cpustat[CPUTIME_SYSTEM]; busy_time += kcpustat.cpustat[CPUTIME_IRQ]; busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ]; busy_time += kcpustat.cpustat[CPUTIME_STEAL]; busy_time += kcpustat.cpustat[CPUTIME_NICE]; idle_time = cur_wall_time - busy_time; if (wall) *wall = div_u64(cur_wall_time, NSEC_PER_USEC); return div_u64(idle_time, NSEC_PER_USEC); } u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy) { u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL); if (idle_time == -1ULL) return get_cpu_idle_time_jiffy(cpu, wall); else if (!io_busy) idle_time += get_cpu_iowait_time_us(cpu, wall); return idle_time; } EXPORT_SYMBOL_GPL(get_cpu_idle_time); /* * This is a generic cpufreq init() routine which can be used by cpufreq * drivers of SMP systems. It will do following: * - validate & show freq table passed * - set policies transition latency * - policy->cpus with all possible CPUs */ void cpufreq_generic_init(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table, unsigned int transition_latency) { policy->freq_table = table; policy->cpuinfo.transition_latency = transition_latency; /* * The driver only supports the SMP configuration where all processors * share the clock and voltage and clock. */ cpumask_setall(policy->cpus); } EXPORT_SYMBOL_GPL(cpufreq_generic_init); struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) { struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL; } EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw); unsigned int cpufreq_generic_get(unsigned int cpu) { struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); if (!policy || IS_ERR(policy->clk)) { pr_err("%s: No %s associated to cpu: %d\n", __func__, policy ? "clk" : "policy", cpu); return 0; } return clk_get_rate(policy->clk) / 1000; } EXPORT_SYMBOL_GPL(cpufreq_generic_get); /** * cpufreq_cpu_get - Return policy for a CPU and mark it as busy. * @cpu: CPU to find the policy for. * * Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment * the kobject reference counter of that policy. Return a valid policy on * success or NULL on failure. * * The policy returned by this function has to be released with the help of * cpufreq_cpu_put() to balance its kobject reference counter properly. */ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) { struct cpufreq_policy *policy = NULL; unsigned long flags; if (WARN_ON(cpu >= nr_cpu_ids)) return NULL; /* get the cpufreq driver */ read_lock_irqsave(&cpufreq_driver_lock, flags); if (cpufreq_driver) { /* get the CPU */ policy = cpufreq_cpu_get_raw(cpu); if (policy) kobject_get(&policy->kobj); } read_unlock_irqrestore(&cpufreq_driver_lock, flags); return policy; } EXPORT_SYMBOL_GPL(cpufreq_cpu_get); /** * cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy. * @policy: cpufreq policy returned by cpufreq_cpu_get(). */ void cpufreq_cpu_put(struct cpufreq_policy *policy) { kobject_put(&policy->kobj); } EXPORT_SYMBOL_GPL(cpufreq_cpu_put); /********************************************************************* * EXTERNALLY AFFECTING FREQUENCY CHANGES * *********************************************************************/ /** * adjust_jiffies - Adjust the system "loops_per_jiffy". * @val: CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE. * @ci: Frequency change information. * * This function alters the system "loops_per_jiffy" for the clock * speed change. Note that loops_per_jiffy cannot be updated on SMP * systems as each CPU might be scaled differently. So, use the arch * per-CPU loops_per_jiffy value wherever possible. */ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) { #ifndef CONFIG_SMP static unsigned long l_p_j_ref; static unsigned int l_p_j_ref_freq; if (ci->flags & CPUFREQ_CONST_LOOPS) return; if (!l_p_j_ref_freq) { l_p_j_ref = loops_per_jiffy; l_p_j_ref_freq = ci->old; pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); } if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) { loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new); pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n", loops_per_jiffy, ci->new); } #endif } /** * cpufreq_notify_transition - Notify frequency transition and adjust jiffies. * @policy: cpufreq policy to enable fast frequency switching for. * @freqs: contain details of the frequency update. * @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE. * * This function calls the transition notifiers and adjust_jiffies(). * * It is called twice on all CPU frequency changes that have external effects. */ static void cpufreq_notify_transition(struct cpufreq_policy *policy, struct cpufreq_freqs *freqs, unsigned int state) { int cpu; BUG_ON(irqs_disabled()); if (cpufreq_disabled()) return; freqs->policy = policy; freqs->flags = cpufreq_driver->flags; pr_debug("notification %u of frequency transition to %u kHz\n", state, freqs->new); switch (state) { case CPUFREQ_PRECHANGE: /* * Detect if the driver reported a value as "old frequency" * which is not equal to what the cpufreq core thinks is * "old frequency". */ if (policy->cur && policy->cur != freqs->old) { pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n", freqs->old, policy->cur); freqs->old = policy->cur; } srcu_notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_PRECHANGE, freqs); adjust_jiffies(CPUFREQ_PRECHANGE, freqs); break; case CPUFREQ_POSTCHANGE: adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new, cpumask_pr_args(policy->cpus)); for_each_cpu(cpu, policy->cpus) trace_cpu_frequency(freqs->new, cpu); srcu_notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_POSTCHANGE, freqs); cpufreq_stats_record_transition(policy, freqs->new); policy->cur = freqs->new; } } /* Do post notifications when there are chances that transition has failed */ static void cpufreq_notify_post_transition(struct cpufreq_policy *policy, struct cpufreq_freqs *freqs, int transition_failed) { cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); if (!transition_failed) return; swap(freqs->old, freqs->new); cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); } void cpufreq_freq_transition_begin(struct cpufreq_policy *policy, struct cpufreq_freqs *freqs) { /* * Catch double invocations of _begin() which lead to self-deadlock. * ASYNC_NOTIFICATION drivers are left out because the cpufreq core * doesn't invoke _begin() on their behalf, and hence the chances of * double invocations are very low. Moreover, there are scenarios * where these checks can emit false-positive warnings in these * drivers; so we avoid that by skipping them altogether. */ WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION) && current == policy->transition_task); wait: wait_event(policy->transition_wait, !policy->transition_ongoing); spin_lock(&policy->transition_lock); if (unlikely(policy->transition_ongoing)) { spin_unlock(&policy->transition_lock); goto wait; } policy->transition_ongoing = true; policy->transition_task = current; spin_unlock(&policy->transition_lock); cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); } EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin); void cpufreq_freq_transition_end(struct cpufreq_policy *policy, struct cpufreq_freqs *freqs, int transition_failed) { if (WARN_ON(!policy->transition_ongoing)) return; cpufreq_notify_post_transition(policy, freqs, transition_failed); arch_set_freq_scale(policy->related_cpus, policy->cur, arch_scale_freq_ref(policy->cpu)); spin_lock(&policy->transition_lock); policy->transition_ongoing = false; policy->transition_task = NULL; spin_unlock(&policy->transition_lock); wake_up(&policy->transition_wait); } EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end); /* * Fast frequency switching status count. Positive means "enabled", negative * means "disabled" and 0 means "not decided yet". */ static int cpufreq_fast_switch_count; static DEFINE_MUTEX(cpufreq_fast_switch_lock); static void cpufreq_list_transition_notifiers(void) { struct notifier_block *nb; pr_info("Registered transition notifiers:\n"); mutex_lock(&cpufreq_transition_notifier_list.mutex); for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next) pr_info("%pS\n", nb->notifier_call); mutex_unlock(&cpufreq_transition_notifier_list.mutex); } /** * cpufreq_enable_fast_switch - Enable fast frequency switching for policy. * @policy: cpufreq policy to enable fast frequency switching for. * * Try to enable fast frequency switching for @policy. * * The attempt will fail if there is at least one transition notifier registered * at this point, as fast frequency switching is quite fundamentally at odds * with transition notifiers. Thus if successful, it will make registration of * transition notifiers fail going forward. */ void cpufreq_enable_fast_switch(struct cpufreq_policy *policy) { lockdep_assert_held(&policy->rwsem); if (!policy->fast_switch_possible) return; mutex_lock(&cpufreq_fast_switch_lock); if (cpufreq_fast_switch_count >= 0) { cpufreq_fast_switch_count++; policy->fast_switch_enabled = true; } else { pr_warn("CPU%u: Fast frequency switching not enabled\n", policy->cpu); cpufreq_list_transition_notifiers(); } mutex_unlock(&cpufreq_fast_switch_lock); } EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch); /** * cpufreq_disable_fast_switch - Disable fast frequency switching for policy. * @policy: cpufreq policy to disable fast frequency switching for. */ void cpufreq_disable_fast_switch(struct cpufreq_policy *policy) { mutex_lock(&cpufreq_fast_switch_lock); if (policy->fast_switch_enabled) { policy->fast_switch_enabled = false; if (!WARN_ON(cpufreq_fast_switch_count <= 0)) cpufreq_fast_switch_count--; } mutex_unlock(&cpufreq_fast_switch_lock); } EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch); static unsigned int __resolve_freq(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int min, unsigned int max, unsigned int relation) { unsigned int idx; target_freq = clamp_val(target_freq, min, max); if (!policy->freq_table) return target_freq; idx = cpufreq_frequency_table_target(policy, target_freq, min, max, relation); policy->cached_resolved_idx = idx; policy->cached_target_freq = target_freq; return policy->freq_table[idx].frequency; } /** * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported * one. * @policy: associated policy to interrogate * @target_freq: target frequency to resolve. * * The target to driver frequency mapping is cached in the policy. * * Return: Lowest driver-supported frequency greater than or equal to the * given target_freq, subject to policy (min/max) and driver limitations. */ unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, unsigned int target_freq) { unsigned int min = READ_ONCE(policy->min); unsigned int max = READ_ONCE(policy->max); /* * If this function runs in parallel with cpufreq_set_policy(), it may * read policy->min before the update and policy->max after the update * or the other way around, so there is no ordering guarantee. * * Resolve this by always honoring the max (in case it comes from * thermal throttling or similar). */ if (unlikely(min > max)) min = max; return __resolve_freq(policy, target_freq, min, max, CPUFREQ_RELATION_LE); } EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq); unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy) { unsigned int latency; if (policy->transition_delay_us) return policy->transition_delay_us; latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC; if (latency) /* Give a 50% breathing room between updates */ return latency + (latency >> 1); return USEC_PER_MSEC; } EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us); /********************************************************************* * SYSFS INTERFACE * *********************************************************************/ static ssize_t show_boost(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", cpufreq_driver->boost_enabled); } static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { bool enable; if (kstrtobool(buf, &enable)) return -EINVAL; if (cpufreq_boost_trigger_state(enable)) { pr_err("%s: Cannot %s BOOST!\n", __func__, str_enable_disable(enable)); return -EINVAL; } pr_debug("%s: cpufreq BOOST %s\n", __func__, str_enabled_disabled(enable)); return count; } define_one_global_rw(boost); static ssize_t show_local_boost(struct cpufreq_policy *policy, char *buf) { return sysfs_emit(buf, "%d\n", policy->boost_enabled); } static int policy_set_boost(struct cpufreq_policy *policy, bool enable) { int ret; if (policy->boost_enabled == enable) return 0; policy->boost_enabled = enable; ret = cpufreq_driver->set_boost(policy, enable); if (ret) policy->boost_enabled = !policy->boost_enabled; return ret; } static ssize_t store_local_boost(struct cpufreq_policy *policy, const char *buf, size_t count) { int ret; bool enable; if (kstrtobool(buf, &enable)) return -EINVAL; if (!cpufreq_driver->boost_enabled) return -EINVAL; if (!policy->boost_supported) return -EINVAL; ret = policy_set_boost(policy, enable); if (!ret) return count; return ret; } static struct freq_attr local_boost = __ATTR(boost, 0644, show_local_boost, store_local_boost); static struct cpufreq_governor *find_governor(const char *str_governor) { struct cpufreq_governor *t; for_each_governor(t) if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN)) return t; return NULL; } static struct cpufreq_governor *get_governor(const char *str_governor) { struct cpufreq_governor *t; mutex_lock(&cpufreq_governor_mutex); t = find_governor(str_governor); if (!t) goto unlock; if (!try_module_get(t->owner)) t = NULL; unlock: mutex_unlock(&cpufreq_governor_mutex); return t; } static unsigned int cpufreq_parse_policy(char *str_governor) { if (!strncasecmp(str_governor, "performance", strlen("performance"))) return CPUFREQ_POLICY_PERFORMANCE; if (!strncasecmp(str_governor, "powersave", strlen("powersave"))) return CPUFREQ_POLICY_POWERSAVE; return CPUFREQ_POLICY_UNKNOWN; } /** * cpufreq_parse_governor - parse a governor string only for has_target() * @str_governor: Governor name. */ static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor) { struct cpufreq_governor *t; t = get_governor(str_governor); if (t) return t; if (request_module("cpufreq_%s", str_governor)) return NULL; return get_governor(str_governor); } /* * cpufreq_per_cpu_attr_read() / show_##file_name() - * print out cpufreq information * * Write out information from cpufreq_driver->policy[cpu]; object must be * "unsigned int". */ #define show_one(file_name, object) \ static ssize_t show_##file_name \ (struct cpufreq_policy *policy, char *buf) \ { \ return sysfs_emit(buf, "%u\n", policy->object); \ } show_one(cpuinfo_min_freq, cpuinfo.min_freq); show_one(cpuinfo_max_freq, cpuinfo.max_freq); show_one(cpuinfo_transition_latency, cpuinfo.transition_latency); show_one(scaling_min_freq, min); show_one(scaling_max_freq, max); __weak int arch_freq_get_on_cpu(int cpu) { return -EOPNOTSUPP; } static inline bool cpufreq_avg_freq_supported(struct cpufreq_policy *policy) { return arch_freq_get_on_cpu(policy->cpu) != -EOPNOTSUPP; } static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf) { ssize_t ret; int freq; freq = IS_ENABLED(CONFIG_CPUFREQ_ARCH_CUR_FREQ) ? arch_freq_get_on_cpu(policy->cpu) : 0; if (freq > 0) ret = sysfs_emit(buf, "%u\n", freq); else if (cpufreq_driver->setpolicy && cpufreq_driver->get) ret = sysfs_emit(buf, "%u\n", cpufreq_driver->get(policy->cpu)); else ret = sysfs_emit(buf, "%u\n", policy->cur); return ret; } /* * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access */ #define store_one(file_name, object) \ static ssize_t store_##file_name \ (struct cpufreq_policy *policy, const char *buf, size_t count) \ { \ unsigned long val; \ int ret; \ \ ret = kstrtoul(buf, 0, &val); \ if (ret) \ return ret; \ \ ret = freq_qos_update_request(policy->object##_freq_req, val);\ return ret >= 0 ? count : ret; \ } store_one(scaling_min_freq, min); store_one(scaling_max_freq, max); /* * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware */ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, char *buf) { unsigned int cur_freq = __cpufreq_get(policy); if (cur_freq) return sysfs_emit(buf, "%u\n", cur_freq); return sysfs_emit(buf, "<unknown>\n"); } /* * show_cpuinfo_avg_freq - average CPU frequency as detected by hardware */ static ssize_t show_cpuinfo_avg_freq(struct cpufreq_policy *policy, char *buf) { int avg_freq = arch_freq_get_on_cpu(policy->cpu); if (avg_freq > 0) return sysfs_emit(buf, "%u\n", avg_freq); return avg_freq != 0 ? avg_freq : -EINVAL; } /* * show_scaling_governor - show the current policy for the specified CPU */ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) { if (policy->policy == CPUFREQ_POLICY_POWERSAVE) return sysfs_emit(buf, "powersave\n"); else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) return sysfs_emit(buf, "performance\n"); else if (policy->governor) return sysfs_emit(buf, "%s\n", policy->governor->name); return -EINVAL; } /* * store_scaling_governor - store policy for the specified CPU */ static ssize_t store_scaling_governor(struct cpufreq_policy *policy, const char *buf, size_t count) { char str_governor[CPUFREQ_NAME_LEN]; int ret; ret = sscanf(buf, "%15s", str_governor); if (ret != 1) return -EINVAL; if (cpufreq_driver->setpolicy) { unsigned int new_pol; new_pol = cpufreq_parse_policy(str_governor); if (!new_pol) return -EINVAL; ret = cpufreq_set_policy(policy, NULL, new_pol); } else { struct cpufreq_governor *new_gov; new_gov = cpufreq_parse_governor(str_governor); if (!new_gov) return -EINVAL; ret = cpufreq_set_policy(policy, new_gov, CPUFREQ_POLICY_UNKNOWN); module_put(new_gov->owner); } return ret ? ret : count; } /* * show_scaling_driver - show the cpufreq driver currently loaded */ static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf) { return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name); } /* * show_scaling_available_governors - show the available CPUfreq governors */ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, char *buf) { ssize_t i = 0; struct cpufreq_governor *t; if (!has_target()) { i += sysfs_emit(buf, "performance powersave"); goto out; } mutex_lock(&cpufreq_governor_mutex); for_each_governor(t) { if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) - (CPUFREQ_NAME_LEN + 2))) break; i += sysfs_emit_at(buf, i, "%s ", t->name); } mutex_unlock(&cpufreq_governor_mutex); out: i += sysfs_emit_at(buf, i, "\n"); return i; } ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf) { ssize_t i = 0; unsigned int cpu; for_each_cpu(cpu, mask) { i += sysfs_emit_at(buf, i, "%u ", cpu); if (i >= (PAGE_SIZE - 5)) break; } /* Remove the extra space at the end */ i--; i += sysfs_emit_at(buf, i, "\n"); return i; } EXPORT_SYMBOL_GPL(cpufreq_show_cpus); /* * show_related_cpus - show the CPUs affected by each transition even if * hw coordination is in use */ static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) { return cpufreq_show_cpus(policy->related_cpus, buf); } /* * show_affected_cpus - show the CPUs affected by each transition */ static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf) { return cpufreq_show_cpus(policy->cpus, buf); } static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy, const char *buf, size_t count) { unsigned int freq = 0; int ret; if (!policy->governor || !policy->governor->store_setspeed) return -EINVAL; ret = kstrtouint(buf, 0, &freq); if (ret) return ret; policy->governor->store_setspeed(policy, freq); return count; } static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf) { if (!policy->governor || !policy->governor->show_setspeed) return sysfs_emit(buf, "<unsupported>\n"); return policy->governor->show_setspeed(policy, buf); } /* * show_bios_limit - show the current cpufreq HW/BIOS limitation */ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) { unsigned int limit; int ret; ret = cpufreq_driver->bios_limit(policy->cpu, &limit); if (!ret) return sysfs_emit(buf, "%u\n", limit); return sysfs_emit(buf, "%u\n", policy->cpuinfo.max_freq); } cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400); cpufreq_freq_attr_ro(cpuinfo_avg_freq); cpufreq_freq_attr_ro(cpuinfo_min_freq); cpufreq_freq_attr_ro(cpuinfo_max_freq); cpufreq_freq_attr_ro(cpuinfo_transition_latency); cpufreq_freq_attr_ro(scaling_available_governors); cpufreq_freq_attr_ro(scaling_driver); cpufreq_freq_attr_ro(scaling_cur_freq); cpufreq_freq_attr_ro(bios_limit); cpufreq_freq_attr_ro(related_cpus); cpufreq_freq_attr_ro(affected_cpus); cpufreq_freq_attr_rw(scaling_min_freq); cpufreq_freq_attr_rw(scaling_max_freq); cpufreq_freq_attr_rw(scaling_governor); cpufreq_freq_attr_rw(scaling_setspeed); static struct attribute *cpufreq_attrs[] = { &cpuinfo_min_freq.attr, &cpuinfo_max_freq.attr, &cpuinfo_transition_latency.attr, &scaling_cur_freq.attr, &scaling_min_freq.attr, &scaling_max_freq.attr, &affected_cpus.attr, &related_cpus.attr, &scaling_governor.attr, &scaling_driver.attr, &scaling_available_governors.attr, &scaling_setspeed.attr, NULL }; ATTRIBUTE_GROUPS(cpufreq); #define to_policy(k) container_of(k, struct cpufreq_policy, kobj) #define to_attr(a) container_of(a, struct freq_attr, attr) static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) { struct cpufreq_policy *policy = to_policy(kobj); struct freq_attr *fattr = to_attr(attr); if (!fattr->show) return -EIO; guard(cpufreq_policy_read)(policy); if (likely(!policy_is_inactive(policy))) return fattr->show(policy, buf); return -EBUSY; } static ssize_t store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct cpufreq_policy *policy = to_policy(kobj); struct freq_attr *fattr = to_attr(attr); if (!fattr->store) return -EIO; guard(cpufreq_policy_write)(policy); if (likely(!policy_is_inactive(policy))) return fattr->store(policy, buf, count); return -EBUSY; } static void cpufreq_sysfs_release(struct kobject *kobj) { struct cpufreq_policy *policy = to_policy(kobj); pr_debug("last reference is dropped\n"); complete(&policy->kobj_unregister); } static const struct sysfs_ops sysfs_ops = { .show = show, .store = store, }; static const struct kobj_type ktype_cpufreq = { .sysfs_ops = &sysfs_ops, .default_groups = cpufreq_groups, .release = cpufreq_sysfs_release, }; static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu, struct device *dev) { if (unlikely(!dev)) return; if (cpumask_test_and_set_cpu(cpu, policy->real_cpus)) return; dev_dbg(dev, "%s: Adding symlink\n", __func__); if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq")) dev_err(dev, "cpufreq symlink creation failed\n"); } static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu, struct device *dev) { dev_dbg(dev, "%s: Removing symlink\n", __func__); sysfs_remove_link(&dev->kobj, "cpufreq"); cpumask_clear_cpu(cpu, policy->real_cpus); } static int cpufreq_add_dev_interface(struct cpufreq_policy *policy) { struct freq_attr **drv_attr; int ret = 0; /* Attributes that need freq_table */ if (policy->freq_table) { ret = sysfs_create_file(&policy->kobj, &cpufreq_freq_attr_scaling_available_freqs.attr); if (ret) return ret; if (cpufreq_boost_supported()) { ret = sysfs_create_file(&policy->kobj, &cpufreq_freq_attr_scaling_boost_freqs.attr); if (ret) return ret; } } /* set up files for this cpu device */ drv_attr = cpufreq_driver->attr; while (drv_attr && *drv_attr) { ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); if (ret) return ret; drv_attr++; } if (cpufreq_driver->get) { ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); if (ret) return ret; } if (cpufreq_avg_freq_supported(policy)) { ret = sysfs_create_file(&policy->kobj, &cpuinfo_avg_freq.attr); if (ret) return ret; } if (cpufreq_driver->bios_limit) { ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); if (ret) return ret; } if (cpufreq_boost_supported()) { ret = sysfs_create_file(&policy->kobj, &local_boost.attr); if (ret) return ret; } return 0; } static int cpufreq_init_policy(struct cpufreq_policy *policy) { struct cpufreq_governor *gov = NULL; unsigned int pol = CPUFREQ_POLICY_UNKNOWN; int ret; if (has_target()) { /* Update policy governor to the one used before hotplug. */ if (policy->last_governor[0] != '\0') gov = get_governor(policy->last_governor); if (gov) { pr_debug("Restoring governor %s for cpu %d\n", gov->name, policy->cpu); } else { gov = get_governor(default_governor); } if (!gov) { gov = cpufreq_default_governor(); __module_get(gov->owner); } } else { /* Use the default policy if there is no last_policy. */ if (policy->last_policy) { pol = policy->last_policy; } else { pol = cpufreq_parse_policy(default_governor); /* * In case the default governor is neither "performance" * nor "powersave", fall back to the initial policy * value set by the driver. */ if (pol == CPUFREQ_POLICY_UNKNOWN) pol = policy->policy; } if (pol != CPUFREQ_POLICY_PERFORMANCE && pol != CPUFREQ_POLICY_POWERSAVE) return -ENODATA; } ret = cpufreq_set_policy(policy, gov, pol); if (gov) module_put(gov->owner); return ret; } static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) { int ret = 0; /* Has this CPU been taken care of already? */ if (cpumask_test_cpu(cpu, policy->cpus)) return 0; guard(cpufreq_policy_write)(policy); if (has_target()) cpufreq_stop_governor(policy); cpumask_set_cpu(cpu, policy->cpus); if (has_target()) { ret = cpufreq_start_governor(policy); if (ret) pr_err("%s: Failed to start governor\n", __func__); } return ret; } void refresh_frequency_limits(struct cpufreq_policy *policy) { if (!policy_is_inactive(policy)) { pr_debug("updating policy for CPU %u\n", policy->cpu); cpufreq_set_policy(policy, policy->governor, policy->policy); } } EXPORT_SYMBOL(refresh_frequency_limits); static void handle_update(struct work_struct *work) { struct cpufreq_policy *policy = container_of(work, struct cpufreq_policy, update); pr_debug("handle_update for cpu %u called\n", policy->cpu); guard(cpufreq_policy_write)(policy); refresh_frequency_limits(policy); } static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq, void *data) { struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min); schedule_work(&policy->update); return 0; } static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq, void *data) { struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max); schedule_work(&policy->update); return 0; } static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy) { struct kobject *kobj; struct completion *cmp; scoped_guard(cpufreq_policy_write, policy) { cpufreq_stats_free_table(policy); kobj = &policy->kobj; cmp = &policy->kobj_unregister; } kobject_put(kobj); /* * We need to make sure that the underlying kobj is * actually not referenced anymore by anybody before we * proceed with unloading. */ pr_debug("waiting for dropping of refcount\n"); wait_for_completion(cmp); pr_debug("wait complete\n"); } static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu) { struct cpufreq_policy *policy; struct device *dev = get_cpu_device(cpu); int ret; if (!dev) return NULL; policy = kzalloc(sizeof(*policy), GFP_KERNEL); if (!policy) return NULL; if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) goto err_free_policy; if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) goto err_free_cpumask; if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL)) goto err_free_rcpumask; init_completion(&policy->kobj_unregister); ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, cpufreq_global_kobject, "policy%u", cpu); if (ret) { dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret); /* * The entire policy object will be freed below, but the extra * memory allocated for the kobject name needs to be freed by * releasing the kobject. */ kobject_put(&policy->kobj); goto err_free_real_cpus; } init_rwsem(&policy->rwsem); freq_constraints_init(&policy->constraints); policy->nb_min.notifier_call = cpufreq_notifier_min; policy->nb_max.notifier_call = cpufreq_notifier_max; ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN, &policy->nb_min); if (ret) { dev_err(dev, "Failed to register MIN QoS notifier: %d (CPU%u)\n", ret, cpu); goto err_kobj_remove; } ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX, &policy->nb_max); if (ret) { dev_err(dev, "Failed to register MAX QoS notifier: %d (CPU%u)\n", ret, cpu); goto err_min_qos_notifier; } INIT_LIST_HEAD(&policy->policy_list); spin_lock_init(&policy->transition_lock); init_waitqueue_head(&policy->transition_wait); INIT_WORK(&policy->update, handle_update); return policy; err_min_qos_notifier: freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN, &policy->nb_min); err_kobj_remove: cpufreq_policy_put_kobj(policy); err_free_real_cpus: free_cpumask_var(policy->real_cpus); err_free_rcpumask: free_cpumask_var(policy->related_cpus); err_free_cpumask: free_cpumask_var(policy->cpus); err_free_policy: kfree(policy); return NULL; } static void cpufreq_policy_free(struct cpufreq_policy *policy) { unsigned long flags; int cpu; /* * The callers must ensure the policy is inactive by now, to avoid any * races with show()/store() callbacks. */ if (unlikely(!policy_is_inactive(policy))) pr_warn("%s: Freeing active policy\n", __func__); /* Remove policy from list */ write_lock_irqsave(&cpufreq_driver_lock, flags); list_del(&policy->policy_list); for_each_cpu(cpu, policy->related_cpus) per_cpu(cpufreq_cpu_data, cpu) = NULL; write_unlock_irqrestore(&cpufreq_driver_lock, flags); freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX, &policy->nb_max); freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN, &policy->nb_min); /* Cancel any pending policy->update work before freeing the policy. */ cancel_work_sync(&policy->update); if (policy->max_freq_req) { /* * Remove max_freq_req after sending CPUFREQ_REMOVE_POLICY * notification, since CPUFREQ_CREATE_POLICY notification was * sent after adding max_freq_req earlier. */ blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_REMOVE_POLICY, policy); freq_qos_remove_request(policy->max_freq_req); } freq_qos_remove_request(policy->min_freq_req); kfree(policy->min_freq_req); cpufreq_policy_put_kobj(policy); free_cpumask_var(policy->real_cpus); free_cpumask_var(policy->related_cpus); free_cpumask_var(policy->cpus); kfree(policy); } static int cpufreq_policy_online(struct cpufreq_policy *policy, unsigned int cpu, bool new_policy) { unsigned long flags; unsigned int j; int ret; guard(cpufreq_policy_write)(policy); policy->cpu = cpu; policy->governor = NULL; if (!new_policy && cpufreq_driver->online) { /* Recover policy->cpus using related_cpus */ cpumask_copy(policy->cpus, policy->related_cpus); ret = cpufreq_driver->online(policy); if (ret) { pr_debug("%s: %d: initialization failed\n", __func__, __LINE__); goto out_exit_policy; } } else { cpumask_copy(policy->cpus, cpumask_of(cpu)); /* * Call driver. From then on the cpufreq must be able * to accept all calls to ->verify and ->setpolicy for this CPU. */ ret = cpufreq_driver->init(policy); if (ret) { pr_debug("%s: %d: initialization failed\n", __func__, __LINE__); goto out_clear_policy; } /* * The initialization has succeeded and the policy is online. * If there is a problem with its frequency table, take it * offline and drop it. */ if (policy->freq_table_sorted != CPUFREQ_TABLE_SORTED_ASCENDING && policy->freq_table_sorted != CPUFREQ_TABLE_SORTED_DESCENDING) { ret = cpufreq_table_validate_and_sort(policy); if (ret) goto out_offline_policy; } /* related_cpus should at least include policy->cpus. */ cpumask_copy(policy->related_cpus, policy->cpus); } /* * affected cpus must always be the one, which are online. We aren't * managing offline cpus here. */ cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); if (new_policy) { for_each_cpu(j, policy->related_cpus) { per_cpu(cpufreq_cpu_data, j) = policy; add_cpu_dev_symlink(policy, j, get_cpu_device(j)); } policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req), GFP_KERNEL); if (!policy->min_freq_req) { ret = -ENOMEM; goto out_destroy_policy; } ret = freq_qos_add_request(&policy->constraints, policy->min_freq_req, FREQ_QOS_MIN, FREQ_QOS_MIN_DEFAULT_VALUE); if (ret < 0) { /* * So we don't call freq_qos_remove_request() for an * uninitialized request. */ kfree(policy->min_freq_req); policy->min_freq_req = NULL; goto out_destroy_policy; } /* * This must be initialized right here to avoid calling * freq_qos_remove_request() on uninitialized request in case * of errors. */ policy->max_freq_req = policy->min_freq_req + 1; ret = freq_qos_add_request(&policy->constraints, policy->max_freq_req, FREQ_QOS_MAX, FREQ_QOS_MAX_DEFAULT_VALUE); if (ret < 0) { policy->max_freq_req = NULL; goto out_destroy_policy; } blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_CREATE_POLICY, policy); } else { ret = freq_qos_update_request(policy->max_freq_req, policy->max); if (ret < 0) goto out_destroy_policy; } if (cpufreq_driver->get && has_target()) { policy->cur = cpufreq_driver->get(policy->cpu); if (!policy->cur) { ret = -EIO; pr_err("%s: ->get() failed\n", __func__); goto out_destroy_policy; } } /* * Sometimes boot loaders set CPU frequency to a value outside of * frequency table present with cpufreq core. In such cases CPU might be * unstable if it has to run on that frequency for long duration of time * and so its better to set it to a frequency which is specified in * freq-table. This also makes cpufreq stats inconsistent as * cpufreq-stats would fail to register because current frequency of CPU * isn't found in freq-table. * * Because we don't want this change to effect boot process badly, we go * for the next freq which is >= policy->cur ('cur' must be set by now, * otherwise we will end up setting freq to lowest of the table as 'cur' * is initialized to zero). * * We are passing target-freq as "policy->cur - 1" otherwise * __cpufreq_driver_target() would simply fail, as policy->cur will be * equal to target-freq. */ if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK) && has_target()) { unsigned int old_freq = policy->cur; /* Are we running at unknown frequency ? */ ret = cpufreq_frequency_table_get_index(policy, old_freq); if (ret == -EINVAL) { ret = __cpufreq_driver_target(policy, old_freq - 1, CPUFREQ_RELATION_L); /* * Reaching here after boot in a few seconds may not * mean that system will remain stable at "unknown" * frequency for longer duration. Hence, a BUG_ON(). */ BUG_ON(ret); pr_info("%s: CPU%d: Running at unlisted initial frequency: %u kHz, changing to: %u kHz\n", __func__, policy->cpu, old_freq, policy->cur); } } if (new_policy) { ret = cpufreq_add_dev_interface(policy); if (ret) goto out_destroy_policy; cpufreq_stats_create_table(policy); write_lock_irqsave(&cpufreq_driver_lock, flags); list_add(&policy->policy_list, &cpufreq_policy_list); write_unlock_irqrestore(&cpufreq_driver_lock, flags); /* * Register with the energy model before * em_rebuild_sched_domains() is called, which will result * in rebuilding of the sched domains, which should only be done * once the energy model is properly initialized for the policy * first. * * Also, this should be called before the policy is registered * with cooling framework. */ if (cpufreq_driver->register_em) cpufreq_driver->register_em(policy); } ret = cpufreq_init_policy(policy); if (ret) { pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n", __func__, cpu, ret); goto out_destroy_policy; } return 0; out_destroy_policy: for_each_cpu(j, policy->real_cpus) remove_cpu_dev_symlink(policy, j, get_cpu_device(j)); out_offline_policy: if (cpufreq_driver->offline) cpufreq_driver->offline(policy); out_exit_policy: if (cpufreq_driver->exit) cpufreq_driver->exit(policy); out_clear_policy: cpumask_clear(policy->cpus); return ret; } static int cpufreq_online(unsigned int cpu) { struct cpufreq_policy *policy; bool new_policy; int ret; pr_debug("%s: bringing CPU%u online\n", __func__, cpu); /* Check if this CPU already has a policy to manage it */ policy = per_cpu(cpufreq_cpu_data, cpu); if (policy) { WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus)); if (!policy_is_inactive(policy)) return cpufreq_add_policy_cpu(policy, cpu); /* This is the only online CPU for the policy. Start over. */ new_policy = false; } else { new_policy = true; policy = cpufreq_policy_alloc(cpu); if (!policy) return -ENOMEM; } ret = cpufreq_policy_online(policy, cpu, new_policy); if (ret) { cpufreq_policy_free(policy); return ret; } kobject_uevent(&policy->kobj, KOBJ_ADD); /* Callback for handling stuff after policy is ready */ if (cpufreq_driver->ready) cpufreq_driver->ready(policy); /* Register cpufreq cooling only for a new policy */ if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver)) policy->cdev = of_cpufreq_cooling_register(policy); /* * Let the per-policy boost flag mirror the cpufreq_driver boost during * initialization for a new policy. For an existing policy, maintain the * previous boost value unless global boost is disabled. */ if (cpufreq_driver->set_boost && policy->boost_supported && (new_policy || !cpufreq_boost_enabled())) { ret = policy_set_boost(policy, cpufreq_boost_enabled()); if (ret) { /* If the set_boost fails, the online operation is not affected */ pr_info("%s: CPU%d: Cannot %s BOOST\n", __func__, policy->cpu, str_enable_disable(cpufreq_boost_enabled())); } } pr_debug("initialization complete\n"); return 0; } /** * cpufreq_add_dev - the cpufreq interface for a CPU device. * @dev: CPU device. * @sif: Subsystem interface structure pointer (not used) */ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) { struct cpufreq_policy *policy; unsigned cpu = dev->id; int ret; dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu); if (cpu_online(cpu)) { ret = cpufreq_online(cpu); if (ret) return ret; } /* Create sysfs link on CPU registration */ policy = per_cpu(cpufreq_cpu_data, cpu); if (policy) add_cpu_dev_symlink(policy, cpu, dev); return 0; } static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy) { int ret; if (has_target()) cpufreq_stop_governor(policy); cpumask_clear_cpu(cpu, policy->cpus); if (!policy_is_inactive(policy)) { /* Nominate a new CPU if necessary. */ if (cpu == policy->cpu) policy->cpu = cpumask_any(policy->cpus); /* Start the governor again for the active policy. */ if (has_target()) { ret = cpufreq_start_governor(policy); if (ret) pr_err("%s: Failed to start governor\n", __func__); } return; } if (has_target()) { strscpy(policy->last_governor, policy->governor->name, CPUFREQ_NAME_LEN); cpufreq_exit_governor(policy); } else { policy->last_policy = policy->policy; } /* * Perform the ->offline() during light-weight tear-down, as * that allows fast recovery when the CPU comes back. */ if (cpufreq_driver->offline) { cpufreq_driver->offline(policy); return; } if (cpufreq_driver->exit) cpufreq_driver->exit(policy); policy->freq_table = NULL; } static int cpufreq_offline(unsigned int cpu) { struct cpufreq_policy *policy; pr_debug("%s: unregistering CPU %u\n", __func__, cpu); policy = cpufreq_cpu_get_raw(cpu); if (!policy) { pr_debug("%s: No cpu_data found\n", __func__); return 0; } guard(cpufreq_policy_write)(policy); __cpufreq_offline(cpu, policy); return 0; } /* * cpufreq_remove_dev - remove a CPU device * * Removes the cpufreq interface for a CPU device. */ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) { unsigned int cpu = dev->id; struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); if (!policy) return; scoped_guard(cpufreq_policy_write, policy) { if (cpu_online(cpu)) __cpufreq_offline(cpu, policy); remove_cpu_dev_symlink(policy, cpu, dev); if (!cpumask_empty(policy->real_cpus)) return; /* * Unregister cpufreq cooling once all the CPUs of the policy * are removed. */ if (cpufreq_thermal_control_enabled(cpufreq_driver)) { cpufreq_cooling_unregister(policy->cdev); policy->cdev = NULL; } /* We did light-weight exit earlier, do full tear down now */ if (cpufreq_driver->offline && cpufreq_driver->exit) cpufreq_driver->exit(policy); } cpufreq_policy_free(policy); } /** * cpufreq_out_of_sync - Fix up actual and saved CPU frequency difference. * @policy: Policy managing CPUs. * @new_freq: New CPU frequency. * * Adjust to the current frequency first and clean up later by either calling * cpufreq_update_policy(), or scheduling handle_update(). */ static void cpufreq_out_of_sync(struct cpufreq_policy *policy, unsigned int new_freq) { struct cpufreq_freqs freqs; pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n", policy->cur, new_freq); freqs.old = policy->cur; freqs.new = new_freq; cpufreq_freq_transition_begin(policy, &freqs); cpufreq_freq_transition_end(policy, &freqs, 0); } static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update) { unsigned int new_freq; if (!cpufreq_driver->get) return 0; new_freq = cpufreq_driver->get(policy->cpu); if (!new_freq) return 0; /* * If fast frequency switching is used with the given policy, the check * against policy->cur is pointless, so skip it in that case. */ if (policy->fast_switch_enabled || !has_target()) return new_freq; if (policy->cur != new_freq) { /* * For some platforms, the frequency returned by hardware may be * slightly different from what is provided in the frequency * table, for example hardware may return 499 MHz instead of 500 * MHz. In such cases it is better to avoid getting into * unnecessary frequency updates. */ if (abs(policy->cur - new_freq) < KHZ_PER_MHZ) return policy->cur; cpufreq_out_of_sync(policy, new_freq); if (update) schedule_work(&policy->update); } return new_freq; } /** * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur * @cpu: CPU number * * This is the last known freq, without actually getting it from the driver. * Return value will be same as what is shown in scaling_cur_freq in sysfs. */ unsigned int cpufreq_quick_get(unsigned int cpu) { unsigned long flags; read_lock_irqsave(&cpufreq_driver_lock, flags); if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) { unsigned int ret_freq = cpufreq_driver->get(cpu); read_unlock_irqrestore(&cpufreq_driver_lock, flags); return ret_freq; } read_unlock_irqrestore(&cpufreq_driver_lock, flags); struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); if (policy) return policy->cur; return 0; } EXPORT_SYMBOL(cpufreq_quick_get); /** * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU * @cpu: CPU number * * Just return the max possible frequency for a given CPU. */ unsigned int cpufreq_quick_get_max(unsigned int cpu) { struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); if (policy) return policy->max; return 0; } EXPORT_SYMBOL(cpufreq_quick_get_max); /** * cpufreq_get_hw_max_freq - get the max hardware frequency of the CPU * @cpu: CPU number * * The default return value is the max_freq field of cpuinfo. */ __weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu) { struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); if (policy) return policy->cpuinfo.max_freq; return 0; } EXPORT_SYMBOL(cpufreq_get_hw_max_freq); static unsigned int __cpufreq_get(struct cpufreq_policy *policy) { if (unlikely(policy_is_inactive(policy))) return 0; return cpufreq_verify_current_freq(policy, true); } /** * cpufreq_get - get the current CPU frequency (in kHz) * @cpu: CPU number * * Get the CPU current (static) CPU frequency */ unsigned int cpufreq_get(unsigned int cpu) { struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); if (!policy) return 0; guard(cpufreq_policy_read)(policy); return __cpufreq_get(policy); } EXPORT_SYMBOL(cpufreq_get); static struct subsys_interface cpufreq_interface = { .name = "cpufreq", .subsys = &cpu_subsys, .add_dev = cpufreq_add_dev, .remove_dev = cpufreq_remove_dev, }; /* * In case platform wants some specific frequency to be configured * during suspend.. */ int cpufreq_generic_suspend(struct cpufreq_policy *policy) { int ret; if (!policy->suspend_freq) { pr_debug("%s: suspend_freq not defined\n", __func__); return 0; } pr_debug("%s: Setting suspend-freq: %u\n", __func__, policy->suspend_freq); ret = __cpufreq_driver_target(policy, policy->suspend_freq, CPUFREQ_RELATION_H); if (ret) pr_err("%s: unable to set suspend-freq: %u. err: %d\n", __func__, policy->suspend_freq, ret); return ret; } EXPORT_SYMBOL(cpufreq_generic_suspend); /** * cpufreq_suspend() - Suspend CPUFreq governors. * * Called during system wide Suspend/Hibernate cycles for suspending governors * as some platforms can't change frequency after this point in suspend cycle. * Because some of the devices (like: i2c, regulators, etc) they use for * changing frequency are suspended quickly after this point. */ void cpufreq_suspend(void) { struct cpufreq_policy *policy; if (!cpufreq_driver) return; if (!has_target() && !cpufreq_driver->suspend) goto suspend; pr_debug("%s: Suspending Governors\n", __func__); for_each_active_policy(policy) { if (has_target()) { scoped_guard(cpufreq_policy_write, policy) { cpufreq_stop_governor(policy); } } if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy)) pr_err("%s: Failed to suspend driver: %s\n", __func__, cpufreq_driver->name); } suspend: cpufreq_suspended = true; } /** * cpufreq_resume() - Resume CPUFreq governors. * * Called during system wide Suspend/Hibernate cycle for resuming governors that * are suspended with cpufreq_suspend(). */ void cpufreq_resume(void) { struct cpufreq_policy *policy; int ret; if (!cpufreq_driver) return; if (unlikely(!cpufreq_suspended)) return; cpufreq_suspended = false; if (!has_target() && !cpufreq_driver->resume) return; pr_debug("%s: Resuming Governors\n", __func__); for_each_active_policy(policy) { if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) { pr_err("%s: Failed to resume driver: %s\n", __func__, cpufreq_driver->name); } else if (has_target()) { scoped_guard(cpufreq_policy_write, policy) { ret = cpufreq_start_governor(policy); } if (ret) pr_err("%s: Failed to start governor for CPU%u's policy\n", __func__, policy->cpu); } } } /** * cpufreq_driver_test_flags - Test cpufreq driver's flags against given ones. * @flags: Flags to test against the current cpufreq driver's flags. * * Assumes that the driver is there, so callers must ensure that this is the * case. */ bool cpufreq_driver_test_flags(u16 flags) { return !!(cpufreq_driver->flags & flags); } /** * cpufreq_get_current_driver - Return the current driver's name. * * Return the name string of the currently registered cpufreq driver or NULL if * none. */ const char *cpufreq_get_current_driver(void) { if (cpufreq_driver) return cpufreq_driver->name; return NULL; } EXPORT_SYMBOL_GPL(cpufreq_get_current_driver); /** * cpufreq_get_driver_data - Return current driver data. * * Return the private data of the currently registered cpufreq driver, or NULL * if no cpufreq driver has been registered. */ void *cpufreq_get_driver_data(void) { if (cpufreq_driver) return cpufreq_driver->driver_data; return NULL; } EXPORT_SYMBOL_GPL(cpufreq_get_driver_data); /********************************************************************* * NOTIFIER LISTS INTERFACE * *********************************************************************/ /** * cpufreq_register_notifier - Register a notifier with cpufreq. * @nb: notifier function to register. * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER. * * Add a notifier to one of two lists: either a list of notifiers that run on * clock rate changes (once before and once after every transition), or a list * of notifiers that ron on cpufreq policy changes. * * This function may sleep and it has the same return values as * blocking_notifier_chain_register(). */ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) { int ret; if (cpufreq_disabled()) return -EINVAL; switch (list) { case CPUFREQ_TRANSITION_NOTIFIER: mutex_lock(&cpufreq_fast_switch_lock); if (cpufreq_fast_switch_count > 0) { mutex_unlock(&cpufreq_fast_switch_lock); return -EBUSY; } ret = srcu_notifier_chain_register( &cpufreq_transition_notifier_list, nb); if (!ret) cpufreq_fast_switch_count--; mutex_unlock(&cpufreq_fast_switch_lock); break; case CPUFREQ_POLICY_NOTIFIER: ret = blocking_notifier_chain_register( &cpufreq_policy_notifier_list, nb); break; default: ret = -EINVAL; } return ret; } EXPORT_SYMBOL(cpufreq_register_notifier); /** * cpufreq_unregister_notifier - Unregister a notifier from cpufreq. * @nb: notifier block to be unregistered. * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER. * * Remove a notifier from one of the cpufreq notifier lists. * * This function may sleep and it has the same return values as * blocking_notifier_chain_unregister(). */ int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) { int ret; if (cpufreq_disabled()) return -EINVAL; switch (list) { case CPUFREQ_TRANSITION_NOTIFIER: mutex_lock(&cpufreq_fast_switch_lock); ret = srcu_notifier_chain_unregister( &cpufreq_transition_notifier_list, nb); if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0)) cpufreq_fast_switch_count++; mutex_unlock(&cpufreq_fast_switch_lock); break; case CPUFREQ_POLICY_NOTIFIER: ret = blocking_notifier_chain_unregister( &cpufreq_policy_notifier_list, nb); break; default: ret = -EINVAL; } return ret; } EXPORT_SYMBOL(cpufreq_unregister_notifier); /********************************************************************* * GOVERNORS * *********************************************************************/ /** * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch. * @policy: cpufreq policy to switch the frequency for. * @target_freq: New frequency to set (may be approximate). * * Carry out a fast frequency switch without sleeping. * * The driver's ->fast_switch() callback invoked by this function must be * suitable for being called from within RCU-sched read-side critical sections * and it is expected to select the minimum available frequency greater than or * equal to @target_freq (CPUFREQ_RELATION_L). * * This function must not be called if policy->fast_switch_enabled is unset. * * Governors calling this function must guarantee that it will never be invoked * twice in parallel for the same policy and that it will never be called in * parallel with either ->target() or ->target_index() for the same policy. * * Returns the actual frequency set for the CPU. * * If 0 is returned by the driver's ->fast_switch() callback to indicate an * error condition, the hardware configuration must be preserved. */ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, unsigned int target_freq) { unsigned int freq; int cpu; target_freq = clamp_val(target_freq, policy->min, policy->max); freq = cpufreq_driver->fast_switch(policy, target_freq); if (!freq) return 0; policy->cur = freq; arch_set_freq_scale(policy->related_cpus, freq, arch_scale_freq_ref(policy->cpu)); cpufreq_stats_record_transition(policy, freq); if (trace_cpu_frequency_enabled()) { for_each_cpu(cpu, policy->cpus) trace_cpu_frequency(freq, cpu); } return freq; } EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch); /** * cpufreq_driver_adjust_perf - Adjust CPU performance level in one go. * @cpu: Target CPU. * @min_perf: Minimum (required) performance level (units of @capacity). * @target_perf: Target (desired) performance level (units of @capacity). * @capacity: Capacity of the target CPU. * * Carry out a fast performance level switch of @cpu without sleeping. * * The driver's ->adjust_perf() callback invoked by this function must be * suitable for being called from within RCU-sched read-side critical sections * and it is expected to select a suitable performance level equal to or above * @min_perf and preferably equal to or below @target_perf. * * This function must not be called if policy->fast_switch_enabled is unset. * * Governors calling this function must guarantee that it will never be invoked * twice in parallel for the same CPU and that it will never be called in * parallel with either ->target() or ->target_index() or ->fast_switch() for * the same CPU. */ void cpufreq_driver_adjust_perf(unsigned int cpu, unsigned long min_perf, unsigned long target_perf, unsigned long capacity) { cpufreq_driver->adjust_perf(cpu, min_perf, target_perf, capacity); } /** * cpufreq_driver_has_adjust_perf - Check "direct fast switch" callback. * * Return 'true' if the ->adjust_perf callback is present for the * current driver or 'false' otherwise. */ bool cpufreq_driver_has_adjust_perf(void) { return !!cpufreq_driver->adjust_perf; } /* Must set freqs->new to intermediate frequency */ static int __target_intermediate(struct cpufreq_policy *policy, struct cpufreq_freqs *freqs, int index) { int ret; freqs->new = cpufreq_driver->get_intermediate(policy, index); /* We don't need to switch to intermediate freq */ if (!freqs->new) return 0; pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n", __func__, policy->cpu, freqs->old, freqs->new); cpufreq_freq_transition_begin(policy, freqs); ret = cpufreq_driver->target_intermediate(policy, index); cpufreq_freq_transition_end(policy, freqs, ret); if (ret) pr_err("%s: Failed to change to intermediate frequency: %d\n", __func__, ret); return ret; } static int __target_index(struct cpufreq_policy *policy, int index) { struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0}; unsigned int restore_freq, intermediate_freq = 0; unsigned int newfreq = policy->freq_table[index].frequency; int retval = -EINVAL; bool notify; if (newfreq == policy->cur) return 0; /* Save last value to restore later on errors */ restore_freq = policy->cur; notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION); if (notify) { /* Handle switching to intermediate frequency */ if (cpufreq_driver->get_intermediate) { retval = __target_intermediate(policy, &freqs, index); if (retval) return retval; intermediate_freq = freqs.new; /* Set old freq to intermediate */ if (intermediate_freq) freqs.old = freqs.new; } freqs.new = newfreq; pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n", __func__, policy->cpu, freqs.old, freqs.new); cpufreq_freq_transition_begin(policy, &freqs); } retval = cpufreq_driver->target_index(policy, index); if (retval) pr_err("%s: Failed to change cpu frequency: %d\n", __func__, retval); if (notify) { cpufreq_freq_transition_end(policy, &freqs, retval); /* * Failed after setting to intermediate freq? Driver should have * reverted back to initial frequency and so should we. Check * here for intermediate_freq instead of get_intermediate, in * case we haven't switched to intermediate freq at all. */ if (unlikely(retval && intermediate_freq)) { freqs.old = intermediate_freq; freqs.new = restore_freq; cpufreq_freq_transition_begin(policy, &freqs); cpufreq_freq_transition_end(policy, &freqs, 0); } } return retval; } int __cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { unsigned int old_target_freq = target_freq; if (cpufreq_disabled()) return -ENODEV; target_freq = __resolve_freq(policy, target_freq, policy->min, policy->max, relation); pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", policy->cpu, target_freq, relation, old_target_freq); /* * This might look like a redundant call as we are checking it again * after finding index. But it is left intentionally for cases where * exactly same freq is called again and so we can save on few function * calls. */ if (target_freq == policy->cur && !(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS)) return 0; if (cpufreq_driver->target) { /* * If the driver hasn't setup a single inefficient frequency, * it's unlikely it knows how to decode CPUFREQ_RELATION_E. */ if (!policy->efficiencies_available) relation &= ~CPUFREQ_RELATION_E; return cpufreq_driver->target(policy, target_freq, relation); } if (!cpufreq_driver->target_index) return -EINVAL; return __target_index(policy, policy->cached_resolved_idx); } EXPORT_SYMBOL_GPL(__cpufreq_driver_target); int cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { guard(cpufreq_policy_write)(policy); return __cpufreq_driver_target(policy, target_freq, relation); } EXPORT_SYMBOL_GPL(cpufreq_driver_target); __weak struct cpufreq_governor *cpufreq_fallback_governor(void) { return NULL; } static int cpufreq_init_governor(struct cpufreq_policy *policy) { int ret; /* Don't start any governor operations if we are entering suspend */ if (cpufreq_suspended) return 0; /* * Governor might not be initiated here if ACPI _PPC changed * notification happened, so check it. */ if (!policy->governor) return -EINVAL; /* Platform doesn't want dynamic frequency switching ? */ if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING && cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) { struct cpufreq_governor *gov = cpufreq_fallback_governor(); if (gov) { pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n", policy->governor->name, gov->name); policy->governor = gov; } else { return -EINVAL; } } if (!try_module_get(policy->governor->owner)) return -EINVAL; pr_debug("%s: for CPU %u\n", __func__, policy->cpu); if (policy->governor->init) { ret = policy->governor->init(policy); if (ret) { module_put(policy->governor->owner); return ret; } } policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET); return 0; } static void cpufreq_exit_governor(struct cpufreq_policy *policy) { if (cpufreq_suspended || !policy->governor) return; pr_debug("%s: for CPU %u\n", __func__, policy->cpu); if (policy->governor->exit) policy->governor->exit(policy); module_put(policy->governor->owner); } int cpufreq_start_governor(struct cpufreq_policy *policy) { int ret; if (cpufreq_suspended) return 0; if (!policy->governor) return -EINVAL; pr_debug("%s: for CPU %u\n", __func__, policy->cpu); cpufreq_verify_current_freq(policy, false); if (policy->governor->start) { ret = policy->governor->start(policy); if (ret) return ret; } if (policy->governor->limits) policy->governor->limits(policy); return 0; } void cpufreq_stop_governor(struct cpufreq_policy *policy) { if (cpufreq_suspended || !policy->governor) return; pr_debug("%s: for CPU %u\n", __func__, policy->cpu); if (policy->governor->stop) policy->governor->stop(policy); } static void cpufreq_governor_limits(struct cpufreq_policy *policy) { if (cpufreq_suspended || !policy->governor) return; pr_debug("%s: for CPU %u\n", __func__, policy->cpu); if (policy->governor->limits) policy->governor->limits(policy); } int cpufreq_register_governor(struct cpufreq_governor *governor) { int err; if (!governor) return -EINVAL; if (cpufreq_disabled()) return -ENODEV; mutex_lock(&cpufreq_governor_mutex); err = -EBUSY; if (!find_governor(governor->name)) { err = 0; list_add(&governor->governor_list, &cpufreq_governor_list); } mutex_unlock(&cpufreq_governor_mutex); return err; } EXPORT_SYMBOL_GPL(cpufreq_register_governor); void cpufreq_unregister_governor(struct cpufreq_governor *governor) { struct cpufreq_policy *policy; unsigned long flags; if (!governor) return; if (cpufreq_disabled()) return; /* clear last_governor for all inactive policies */ read_lock_irqsave(&cpufreq_driver_lock, flags); for_each_inactive_policy(policy) { if (!strcmp(policy->last_governor, governor->name)) { policy->governor = NULL; policy->last_governor[0] = '\0'; } } read_unlock_irqrestore(&cpufreq_driver_lock, flags); mutex_lock(&cpufreq_governor_mutex); list_del(&governor->governor_list); mutex_unlock(&cpufreq_governor_mutex); } EXPORT_SYMBOL_GPL(cpufreq_unregister_governor); /********************************************************************* * POLICY INTERFACE * *********************************************************************/ DEFINE_PER_CPU(unsigned long, cpufreq_pressure); /** * cpufreq_update_pressure() - Update cpufreq pressure for CPUs * @policy: cpufreq policy of the CPUs. * * Update the value of cpufreq pressure for all @cpus in the policy. */ static void cpufreq_update_pressure(struct cpufreq_policy *policy) { unsigned long max_capacity, capped_freq, pressure; u32 max_freq; int cpu; cpu = cpumask_first(policy->related_cpus); max_freq = arch_scale_freq_ref(cpu); capped_freq = policy->max; /* * Handle properly the boost frequencies, which should simply clean * the cpufreq pressure value. */ if (max_freq <= capped_freq) { pressure = 0; } else { max_capacity = arch_scale_cpu_capacity(cpu); pressure = max_capacity - mult_frac(max_capacity, capped_freq, max_freq); } for_each_cpu(cpu, policy->related_cpus) WRITE_ONCE(per_cpu(cpufreq_pressure, cpu), pressure); } /** * cpufreq_set_policy - Modify cpufreq policy parameters. * @policy: Policy object to modify. * @new_gov: Policy governor pointer. * @new_pol: Policy value (for drivers with built-in governors). * * Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency * limits to be set for the policy, update @policy with the verified limits * values and either invoke the driver's ->setpolicy() callback (if present) or * carry out a governor update for @policy. That is, run the current governor's * ->limits() callback (if @new_gov points to the same object as the one in * @policy) or replace the governor for @policy with @new_gov. * * The cpuinfo part of @policy is not updated by this function. */ static int cpufreq_set_policy(struct cpufreq_policy *policy, struct cpufreq_governor *new_gov, unsigned int new_pol) { struct cpufreq_policy_data new_data; struct cpufreq_governor *old_gov; int ret; memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); new_data.freq_table = policy->freq_table; new_data.cpu = policy->cpu; /* * PM QoS framework collects all the requests from users and provide us * the final aggregated value here. */ new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN); new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX); pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_data.cpu, new_data.min, new_data.max); /* * Verify that the CPU speed can be set within these limits and make sure * that min <= max. */ ret = cpufreq_driver->verify(&new_data); if (ret) return ret; /* * Resolve policy min/max to available frequencies. It ensures * no frequency resolution will neither overshoot the requested maximum * nor undershoot the requested minimum. * * Avoid storing intermediate values in policy->max or policy->min and * compiler optimizations around them because they may be accessed * concurrently by cpufreq_driver_resolve_freq() during the update. */ WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max, new_data.min, new_data.max, CPUFREQ_RELATION_H)); new_data.min = __resolve_freq(policy, new_data.min, new_data.min, new_data.max, CPUFREQ_RELATION_L); WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min); trace_cpu_frequency_limits(policy); cpufreq_update_pressure(policy); policy->cached_target_freq = UINT_MAX; pr_debug("new min and max freqs are %u - %u kHz\n", policy->min, policy->max); if (cpufreq_driver->setpolicy) { policy->policy = new_pol; pr_debug("setting range\n"); return cpufreq_driver->setpolicy(policy); } if (new_gov == policy->governor) { pr_debug("governor limits update\n"); cpufreq_governor_limits(policy); return 0; } pr_debug("governor switch\n"); /* save old, working values */ old_gov = policy->governor; /* end old governor */ if (old_gov) { cpufreq_stop_governor(policy); cpufreq_exit_governor(policy); } /* start new governor */ policy->governor = new_gov; ret = cpufreq_init_governor(policy); if (!ret) { ret = cpufreq_start_governor(policy); if (!ret) { pr_debug("governor change\n"); return 0; } cpufreq_exit_governor(policy); } /* new governor failed, so re-start old one */ pr_debug("starting governor %s failed\n", policy->governor->name); if (old_gov) { policy->governor = old_gov; if (cpufreq_init_governor(policy)) { policy->governor = NULL; } else if (cpufreq_start_governor(policy)) { cpufreq_exit_governor(policy); policy->governor = NULL; } } return ret; } static void cpufreq_policy_refresh(struct cpufreq_policy *policy) { guard(cpufreq_policy_write)(policy); /* * BIOS might change freq behind our back * -> ask driver for current freq and notify governors about a change */ if (cpufreq_driver->get && has_target() && (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false)))) return; refresh_frequency_limits(policy); } /** * cpufreq_update_policy - Re-evaluate an existing cpufreq policy. * @cpu: CPU to re-evaluate the policy for. * * Update the current frequency for the cpufreq policy of @cpu and use * cpufreq_set_policy() to re-apply the min and max limits, which triggers the * evaluation of policy notifiers and the cpufreq driver's ->verify() callback * for the policy in question, among other things. */ void cpufreq_update_policy(unsigned int cpu) { struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); if (!policy) return; cpufreq_policy_refresh(policy); } EXPORT_SYMBOL(cpufreq_update_policy); /** * cpufreq_update_limits - Update policy limits for a given CPU. * @cpu: CPU to update the policy limits for. * * Invoke the driver's ->update_limits callback if present or call * cpufreq_policy_refresh() for @cpu. */ void cpufreq_update_limits(unsigned int cpu) { struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); if (!policy) return; if (cpufreq_driver->update_limits) cpufreq_driver->update_limits(policy); else cpufreq_policy_refresh(policy); } EXPORT_SYMBOL_GPL(cpufreq_update_limits); /********************************************************************* * BOOST * *********************************************************************/ int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state) { int ret; if (!policy->freq_table) return -ENXIO; ret = cpufreq_frequency_table_cpuinfo(policy); if (ret) { pr_err("%s: Policy frequency update failed\n", __func__); return ret; } ret = freq_qos_update_request(policy->max_freq_req, policy->max); if (ret < 0) return ret; return 0; } EXPORT_SYMBOL_GPL(cpufreq_boost_set_sw); static int cpufreq_boost_trigger_state(int state) { struct cpufreq_policy *policy; unsigned long flags; int ret = 0; /* * Don't compare 'cpufreq_driver->boost_enabled' with 'state' here to * make sure all policies are in sync with global boost flag. */ write_lock_irqsave(&cpufreq_driver_lock, flags); cpufreq_driver->boost_enabled = state; write_unlock_irqrestore(&cpufreq_driver_lock, flags); cpus_read_lock(); for_each_active_policy(policy) { if (!policy->boost_supported) continue; ret = policy_set_boost(policy, state); if (ret) goto err_reset_state; } cpus_read_unlock(); return 0; err_reset_state: cpus_read_unlock(); write_lock_irqsave(&cpufreq_driver_lock, flags); cpufreq_driver->boost_enabled = !state; write_unlock_irqrestore(&cpufreq_driver_lock, flags); pr_err("%s: Cannot %s BOOST\n", __func__, str_enable_disable(state)); return ret; } static bool cpufreq_boost_supported(void) { return cpufreq_driver->set_boost; } static int create_boost_sysfs_file(void) { int ret; ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr); if (ret) pr_err("%s: cannot register global BOOST sysfs file\n", __func__); return ret; } static void remove_boost_sysfs_file(void) { if (cpufreq_boost_supported()) sysfs_remove_file(cpufreq_global_kobject, &boost.attr); } bool cpufreq_boost_enabled(void) { return cpufreq_driver->boost_enabled; } EXPORT_SYMBOL_GPL(cpufreq_boost_enabled); /********************************************************************* * REGISTER / UNREGISTER CPUFREQ DRIVER * *********************************************************************/ static enum cpuhp_state hp_online; static int cpuhp_cpufreq_online(unsigned int cpu) { cpufreq_online(cpu); return 0; } static int cpuhp_cpufreq_offline(unsigned int cpu) { cpufreq_offline(cpu); return 0; } /** * cpufreq_register_driver - register a CPU Frequency driver * @driver_data: A struct cpufreq_driver containing the values# * submitted by the CPU Frequency driver. * * Registers a CPU Frequency driver to this core code. This code * returns zero on success, -EEXIST when another driver got here first * (and isn't unregistered in the meantime). * */ int cpufreq_register_driver(struct cpufreq_driver *driver_data) { unsigned long flags; int ret; if (cpufreq_disabled()) return -ENODEV; /* * The cpufreq core depends heavily on the availability of device * structure, make sure they are available before proceeding further. */ if (!get_cpu_device(0)) return -EPROBE_DEFER; if (!driver_data || !driver_data->verify || !driver_data->init || (driver_data->target_index && driver_data->target) || (!!driver_data->setpolicy == (driver_data->target_index || driver_data->target)) || (!driver_data->get_intermediate != !driver_data->target_intermediate) || (!driver_data->online != !driver_data->offline) || (driver_data->adjust_perf && !driver_data->fast_switch)) return -EINVAL; pr_debug("trying to register driver %s\n", driver_data->name); /* Protect against concurrent CPU online/offline. */ cpus_read_lock(); write_lock_irqsave(&cpufreq_driver_lock, flags); if (cpufreq_driver) { write_unlock_irqrestore(&cpufreq_driver_lock, flags); ret = -EEXIST; goto out; } cpufreq_driver = driver_data; write_unlock_irqrestore(&cpufreq_driver_lock, flags); if (driver_data->setpolicy) driver_data->flags |= CPUFREQ_CONST_LOOPS; if (cpufreq_boost_supported()) { ret = create_boost_sysfs_file(); if (ret) goto err_null_driver; } /* * Mark support for the scheduler's frequency invariance engine for * drivers that implement target(), target_index() or fast_switch(). */ if (!cpufreq_driver->setpolicy) { static_branch_enable_cpuslocked(&cpufreq_freq_invariance); pr_debug("cpufreq: supports frequency invariance\n"); } ret = subsys_interface_register(&cpufreq_interface); if (ret) goto err_boost_unreg; if (unlikely(list_empty(&cpufreq_policy_list))) { /* if all ->init() calls failed, unregister */ ret = -ENODEV; pr_debug("%s: No CPU initialized for driver %s\n", __func__, driver_data->name); goto err_if_unreg; } ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN, "cpufreq:online", cpuhp_cpufreq_online, cpuhp_cpufreq_offline); if (ret < 0) goto err_if_unreg; hp_online = ret; ret = 0; pr_debug("driver %s up and running\n", driver_data->name); goto out; err_if_unreg: subsys_interface_unregister(&cpufreq_interface); err_boost_unreg: if (!cpufreq_driver->setpolicy) static_branch_disable_cpuslocked(&cpufreq_freq_invariance); remove_boost_sysfs_file(); err_null_driver: write_lock_irqsave(&cpufreq_driver_lock, flags); cpufreq_driver = NULL; write_unlock_irqrestore(&cpufreq_driver_lock, flags); out: cpus_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(cpufreq_register_driver); /* * cpufreq_unregister_driver - unregister the current CPUFreq driver * * Unregister the current CPUFreq driver. Only call this if you have * the right to do so, i.e. if you have succeeded in initialising before! * Returns zero if successful, and -EINVAL if the cpufreq_driver is * currently not initialised. */ void cpufreq_unregister_driver(struct cpufreq_driver *driver) { unsigned long flags; if (WARN_ON(!cpufreq_driver || (driver != cpufreq_driver))) return; pr_debug("unregistering driver %s\n", driver->name); /* Protect against concurrent cpu hotplug */ cpus_read_lock(); subsys_interface_unregister(&cpufreq_interface); remove_boost_sysfs_file(); static_branch_disable_cpuslocked(&cpufreq_freq_invariance); cpuhp_remove_state_nocalls_cpuslocked(hp_online); write_lock_irqsave(&cpufreq_driver_lock, flags); cpufreq_driver = NULL; write_unlock_irqrestore(&cpufreq_driver_lock, flags); cpus_read_unlock(); } EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); static int __init cpufreq_core_init(void) { struct cpufreq_governor *gov = cpufreq_default_governor(); struct device *dev_root; if (cpufreq_disabled()) return -ENODEV; dev_root = bus_get_dev_root(&cpu_subsys); if (dev_root) { cpufreq_global_kobject = kobject_create_and_add("cpufreq", &dev_root->kobj); put_device(dev_root); } BUG_ON(!cpufreq_global_kobject); if (!strlen(default_governor)) strscpy(default_governor, gov->name, CPUFREQ_NAME_LEN); return 0; } static bool cpufreq_policy_is_good_for_eas(unsigned int cpu) { struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu); if (!policy) { pr_debug("cpufreq policy not set for CPU: %d\n", cpu); return false; } return sugov_is_governor(policy); } bool cpufreq_ready_for_eas(const struct cpumask *cpu_mask) { unsigned int cpu; /* Do not attempt EAS if schedutil is not being used. */ for_each_cpu(cpu, cpu_mask) { if (!cpufreq_policy_is_good_for_eas(cpu)) { pr_debug("rd %*pbl: schedutil is mandatory for EAS\n", cpumask_pr_args(cpu_mask)); return false; } } return true; } module_param(off, int, 0444); module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444); core_initcall(cpufreq_core_init);
88 19 2 10 2 43 34 12 32 32 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM cgroup #if !defined(_TRACE_CGROUP_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_CGROUP_H #include <linux/cgroup.h> #include <linux/tracepoint.h> DECLARE_EVENT_CLASS(cgroup_root, TP_PROTO(struct cgroup_root *root), TP_ARGS(root), TP_STRUCT__entry( __field( int, root ) __field( u16, ss_mask ) __string( name, root->name ) ), TP_fast_assign( __entry->root = root->hierarchy_id; __entry->ss_mask = root->subsys_mask; __assign_str(name); ), TP_printk("root=%d ss_mask=%#x name=%s", __entry->root, __entry->ss_mask, __get_str(name)) ); DEFINE_EVENT(cgroup_root, cgroup_setup_root, TP_PROTO(struct cgroup_root *root), TP_ARGS(root) ); DEFINE_EVENT(cgroup_root, cgroup_destroy_root, TP_PROTO(struct cgroup_root *root), TP_ARGS(root) ); DEFINE_EVENT(cgroup_root, cgroup_remount, TP_PROTO(struct cgroup_root *root), TP_ARGS(root) ); DECLARE_EVENT_CLASS(cgroup, TP_PROTO(struct cgroup *cgrp, const char *path), TP_ARGS(cgrp, path), TP_STRUCT__entry( __field( int, root ) __field( int, level ) __field( u64, id ) __string( path, path ) ), TP_fast_assign( __entry->root = cgrp->root->hierarchy_id; __entry->id = cgroup_id(cgrp); __entry->level = cgrp->level; __assign_str(path); ), TP_printk("root=%d id=%llu level=%d path=%s", __entry->root, __entry->id, __entry->level, __get_str(path)) ); DEFINE_EVENT(cgroup, cgroup_mkdir, TP_PROTO(struct cgroup *cgrp, const char *path), TP_ARGS(cgrp, path) ); DEFINE_EVENT(cgroup, cgroup_rmdir, TP_PROTO(struct cgroup *cgrp, const char *path), TP_ARGS(cgrp, path) ); DEFINE_EVENT(cgroup, cgroup_release, TP_PROTO(struct cgroup *cgrp, const char *path), TP_ARGS(cgrp, path) ); DEFINE_EVENT(cgroup, cgroup_rename, TP_PROTO(struct cgroup *cgrp, const char *path), TP_ARGS(cgrp, path) ); DEFINE_EVENT(cgroup, cgroup_freeze, TP_PROTO(struct cgroup *cgrp, const char *path), TP_ARGS(cgrp, path) ); DEFINE_EVENT(cgroup, cgroup_unfreeze, TP_PROTO(struct cgroup *cgrp, const char *path), TP_ARGS(cgrp, path) ); DECLARE_EVENT_CLASS(cgroup_migrate, TP_PROTO(struct cgroup *dst_cgrp, const char *path, struct task_struct *task, bool threadgroup), TP_ARGS(dst_cgrp, path, task, threadgroup), TP_STRUCT__entry( __field( int, dst_root ) __field( int, dst_level ) __field( u64, dst_id ) __field( int, pid ) __string( dst_path, path ) __string( comm, task->comm ) ), TP_fast_assign( __entry->dst_root = dst_cgrp->root->hierarchy_id; __entry->dst_id = cgroup_id(dst_cgrp); __entry->dst_level = dst_cgrp->level; __assign_str(dst_path); __entry->pid = task->pid; __assign_str(comm); ), TP_printk("dst_root=%d dst_id=%llu dst_level=%d dst_path=%s pid=%d comm=%s", __entry->dst_root, __entry->dst_id, __entry->dst_level, __get_str(dst_path), __entry->pid, __get_str(comm)) ); DEFINE_EVENT(cgroup_migrate, cgroup_attach_task, TP_PROTO(struct cgroup *dst_cgrp, const char *path, struct task_struct *task, bool threadgroup), TP_ARGS(dst_cgrp, path, task, threadgroup) ); DEFINE_EVENT(cgroup_migrate, cgroup_transfer_tasks, TP_PROTO(struct cgroup *dst_cgrp, const char *path, struct task_struct *task, bool threadgroup), TP_ARGS(dst_cgrp, path, task, threadgroup) ); DECLARE_EVENT_CLASS(cgroup_event, TP_PROTO(struct cgroup *cgrp, const char *path, int val), TP_ARGS(cgrp, path, val), TP_STRUCT__entry( __field( int, root ) __field( int, level ) __field( u64, id ) __string( path, path ) __field( int, val ) ), TP_fast_assign( __entry->root = cgrp->root->hierarchy_id; __entry->id = cgroup_id(cgrp); __entry->level = cgrp->level; __assign_str(path); __entry->val = val; ), TP_printk("root=%d id=%llu level=%d path=%s val=%d", __entry->root, __entry->id, __entry->level, __get_str(path), __entry->val) ); DEFINE_EVENT(cgroup_event, cgroup_notify_populated, TP_PROTO(struct cgroup *cgrp, const char *path, int val), TP_ARGS(cgrp, path, val) ); DEFINE_EVENT(cgroup_event, cgroup_notify_frozen, TP_PROTO(struct cgroup *cgrp, const char *path, int val), TP_ARGS(cgrp, path, val) ); DECLARE_EVENT_CLASS(cgroup_rstat, TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), TP_ARGS(cgrp, cpu, contended), TP_STRUCT__entry( __field( int, root ) __field( int, level ) __field( u64, id ) __field( int, cpu ) __field( bool, contended ) ), TP_fast_assign( __entry->root = cgrp->root->hierarchy_id; __entry->id = cgroup_id(cgrp); __entry->level = cgrp->level; __entry->cpu = cpu; __entry->contended = contended; ), TP_printk("root=%d id=%llu level=%d cpu=%d lock contended:%d", __entry->root, __entry->id, __entry->level, __entry->cpu, __entry->contended) ); /* * Related to locks: * global rstat_base_lock for base stats * cgroup_subsys::rstat_ss_lock for subsystem stats */ DEFINE_EVENT(cgroup_rstat, cgroup_rstat_lock_contended, TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), TP_ARGS(cgrp, cpu, contended) ); DEFINE_EVENT(cgroup_rstat, cgroup_rstat_locked, TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), TP_ARGS(cgrp, cpu, contended) ); DEFINE_EVENT(cgroup_rstat, cgroup_rstat_unlock, TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), TP_ARGS(cgrp, cpu, contended) ); #endif /* _TRACE_CGROUP_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
24 23 24 48 48 4 252 19 5 225 3 6 3 182 3 51 4 198 4 2 28 32 228 1 43 9 27 7 44 44 159 177 119 26 158 163 4 35 8 126 2 164 1 1 1 2 2 2 2 55 55 55 55 55 55 55 55 55 55 33 52 77 76 27 1 25 1 7 22 3 3 3 4 2 3 12 8 8 6 2 32 23 57 57 57 57 29 29 29 29 29 27 29 29 29 28 1 29 29 22 7 21 8 8 8 26 3 29 29 8 19 2 17 14 43 43 6 31 35 6 29 29 6 4 2 21 2 1 1 6 21 6 6 6 21 27 29 2 2 2 2 68 61 65 19 45 1 44 1 3 170 170 163 5 29 29 29 17 17 17 154 68 8 86 7 150 7 7 1 1 3 5 3 1 2 2 2 2 4 2 2 106 105 119 119 120 1 118 4 113 113 110 108 73 11 11 10 10 1 1 73 4 3 4 68 69 1 1 1 35 60 17 7 10 126 126 3 123 121 6 115 113 11 170 170 109 276 1 1 1 12 12 3 12 3 12 12 1 1 12 178 178 178 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 // SPDX-License-Identifier: GPL-2.0-or-later /* * TCP over IPv6 * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * * Based on: * linux/net/ipv4/tcp.c * linux/net/ipv4/tcp_input.c * linux/net/ipv4/tcp_output.c * * Fixes: * Hideaki YOSHIFUJI : sin6_scope_id support * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind * a single port at the same time. * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file. */ #include <linux/bottom_half.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/jiffies.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/netdevice.h> #include <linux/init.h> #include <linux/jhash.h> #include <linux/ipsec.h> #include <linux/times.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/ipv6.h> #include <linux/icmpv6.h> #include <linux/random.h> #include <linux/indirect_call_wrapper.h> #include <net/aligned_data.h> #include <net/tcp.h> #include <net/ndisc.h> #include <net/inet6_hashtables.h> #include <net/inet6_connection_sock.h> #include <net/ipv6.h> #include <net/transp_v6.h> #include <net/addrconf.h> #include <net/ip6_route.h> #include <net/ip6_checksum.h> #include <net/inet_ecn.h> #include <net/protocol.h> #include <net/xfrm.h> #include <net/snmp.h> #include <net/dsfield.h> #include <net/timewait_sock.h> #include <net/inet_common.h> #include <net/secure_seq.h> #include <net/hotdata.h> #include <net/busy_poll.h> #include <net/rstreason.h> #include <net/psp.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <crypto/md5.h> #include <trace/events/tcp.h> static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb, enum sk_rst_reason reason); static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, struct request_sock *req); INDIRECT_CALLABLE_SCOPE int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); static const struct inet_connection_sock_af_ops ipv6_mapped; const struct inet_connection_sock_af_ops ipv6_specific; #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) static const struct tcp_sock_af_ops tcp_sock_ipv6_specific; static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; #endif /* Helper returning the inet6 address from a given tcp socket. * It can be used in TCP stack instead of inet6_sk(sk). * This avoids a dereference and allow compiler optimizations. * It is a specialized version of inet6_sk_generic(). */ #define tcp_inet6_sk(sk) (&container_of_const(tcp_sk(sk), \ struct tcp6_sock, tcp)->inet6) static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); if (dst && dst_hold_safe(dst)) { rcu_assign_pointer(sk->sk_rx_dst, dst); sk->sk_rx_dst_ifindex = skb->skb_iif; sk->sk_rx_dst_cookie = rt6_get_cookie(dst_rt6_info(dst)); } } static u32 tcp_v6_init_seq(const struct sk_buff *skb) { return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32, ipv6_hdr(skb)->saddr.s6_addr32, tcp_hdr(skb)->dest, tcp_hdr(skb)->source); } static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb) { return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32, ipv6_hdr(skb)->saddr.s6_addr32); } static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len) { /* This check is replicated from tcp_v6_connect() and intended to * prevent BPF program called below from accessing bytes that are out * of the bound specified by user in addr_len. */ if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; sock_owned_by_me(sk); return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, &addr_len); } static int tcp_v6_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len) { struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; struct inet_connection_sock *icsk = inet_csk(sk); struct in6_addr *saddr = NULL, *final_p, final; struct inet_timewait_death_row *tcp_death_row; struct ipv6_pinfo *np = tcp_inet6_sk(sk); struct inet_sock *inet = inet_sk(sk); struct tcp_sock *tp = tcp_sk(sk); struct net *net = sock_net(sk); struct ipv6_txoptions *opt; struct dst_entry *dst; struct flowi6 fl6; int addr_type; int err; if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; if (usin->sin6_family != AF_INET6) return -EAFNOSUPPORT; memset(&fl6, 0, sizeof(fl6)); if (inet6_test_bit(SNDFLOW, sk)) { fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK; IP6_ECN_flow_init(fl6.flowlabel); if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { struct ip6_flowlabel *flowlabel; flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (IS_ERR(flowlabel)) return -EINVAL; fl6_sock_release(flowlabel); } } /* * connect() to INADDR_ANY means loopback (BSD'ism). */ if (ipv6_addr_any(&usin->sin6_addr)) { if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), &usin->sin6_addr); else usin->sin6_addr = in6addr_loopback; } addr_type = ipv6_addr_type(&usin->sin6_addr); if (addr_type & IPV6_ADDR_MULTICAST) return -ENETUNREACH; if (addr_type&IPV6_ADDR_LINKLOCAL) { if (addr_len >= sizeof(struct sockaddr_in6) && usin->sin6_scope_id) { /* If interface is set while binding, indices * must coincide. */ if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id)) return -EINVAL; sk->sk_bound_dev_if = usin->sin6_scope_id; } /* Connect to link-local address requires an interface */ if (!sk->sk_bound_dev_if) return -EINVAL; } if (tp->rx_opt.ts_recent_stamp && !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) { tp->rx_opt.ts_recent = 0; tp->rx_opt.ts_recent_stamp = 0; WRITE_ONCE(tp->write_seq, 0); } sk->sk_v6_daddr = usin->sin6_addr; np->flow_label = fl6.flowlabel; /* * TCP over IPv4 */ if (addr_type & IPV6_ADDR_MAPPED) { u32 exthdrlen = icsk->icsk_ext_hdr_len; struct sockaddr_in sin; if (ipv6_only_sock(sk)) return -ENETUNREACH; sin.sin_family = AF_INET; sin.sin_port = usin->sin6_port; sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3]; /* Paired with READ_ONCE() in tcp_(get|set)sockopt() */ WRITE_ONCE(icsk->icsk_af_ops, &ipv6_mapped); if (sk_is_mptcp(sk)) mptcpv6_handle_mapped(sk, true); sk->sk_backlog_rcv = tcp_v4_do_rcv; #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) tp->af_specific = &tcp_sock_ipv6_mapped_specific; #endif err = tcp_v4_connect(sk, (struct sockaddr_unsized *)&sin, sizeof(sin)); if (err) { icsk->icsk_ext_hdr_len = exthdrlen; /* Paired with READ_ONCE() in tcp_(get|set)sockopt() */ WRITE_ONCE(icsk->icsk_af_ops, &ipv6_specific); if (sk_is_mptcp(sk)) mptcpv6_handle_mapped(sk, false); sk->sk_backlog_rcv = tcp_v6_do_rcv; #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) tp->af_specific = &tcp_sock_ipv6_specific; #endif goto failure; } np->saddr = sk->sk_v6_rcv_saddr; return err; } if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) saddr = &sk->sk_v6_rcv_saddr; fl6.flowi6_proto = IPPROTO_TCP; fl6.daddr = sk->sk_v6_daddr; fl6.saddr = saddr ? *saddr : np->saddr; fl6.flowlabel = ip6_make_flowinfo(np->tclass, np->flow_label); fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.flowi6_mark = sk->sk_mark; fl6.fl6_dport = usin->sin6_port; fl6.fl6_sport = inet->inet_sport; if (IS_ENABLED(CONFIG_IP_ROUTE_MULTIPATH) && !fl6.fl6_sport) fl6.flowi6_flags = FLOWI_FLAG_ANY_SPORT; fl6.flowi6_uid = sk_uid(sk); opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); final_p = fl6_update_dst(&fl6, opt, &final); security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); dst = ip6_dst_lookup_flow(net, sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto failure; } tp->tcp_usec_ts = dst_tcp_usec_ts(dst); tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row; if (!saddr) { saddr = &fl6.saddr; err = inet_bhash2_update_saddr(sk, saddr, AF_INET6); if (err) goto failure; } /* set the source address */ np->saddr = *saddr; inet->inet_rcv_saddr = LOOPBACK4_IPV6; sk->sk_gso_type = SKB_GSO_TCPV6; ip6_dst_store(sk, dst, false, false); icsk->icsk_ext_hdr_len = psp_sk_overhead(sk); if (opt) icsk->icsk_ext_hdr_len += opt->opt_flen + opt->opt_nflen; tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); inet->inet_dport = usin->sin6_port; tcp_set_state(sk, TCP_SYN_SENT); err = inet6_hash_connect(tcp_death_row, sk); if (err) goto late_failure; sk_set_txhash(sk); if (likely(!tp->repair)) { if (!tp->write_seq) WRITE_ONCE(tp->write_seq, secure_tcpv6_seq(np->saddr.s6_addr32, sk->sk_v6_daddr.s6_addr32, inet->inet_sport, inet->inet_dport)); tp->tsoffset = secure_tcpv6_ts_off(net, np->saddr.s6_addr32, sk->sk_v6_daddr.s6_addr32); } if (tcp_fastopen_defer_connect(sk, &err)) return err; if (err) goto late_failure; err = tcp_connect(sk); if (err) goto late_failure; return 0; late_failure: tcp_set_state(sk, TCP_CLOSE); inet_bhash2_reset_saddr(sk); failure: inet->inet_dport = 0; sk->sk_route_caps = 0; return err; } static void tcp_v6_mtu_reduced(struct sock *sk) { struct dst_entry *dst; u32 mtu; if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) return; mtu = READ_ONCE(tcp_sk(sk)->mtu_info); /* Drop requests trying to increase our current mss. * Check done in __ip6_rt_update_pmtu() is too late. */ if (tcp_mtu_to_mss(sk, mtu) >= tcp_sk(sk)->mss_cache) return; dst = inet6_csk_update_pmtu(sk, mtu); if (!dst) return; if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) { tcp_sync_mss(sk, dst_mtu(dst)); tcp_simple_retransmit(sk); } } static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; const struct tcphdr *th = (struct tcphdr *)(skb->data+offset); struct net *net = dev_net_rcu(skb->dev); struct request_sock *fastopen; struct ipv6_pinfo *np; struct tcp_sock *tp; __u32 seq, snd_una; struct sock *sk; bool fatal; int err; sk = __inet6_lookup_established(net, &hdr->daddr, th->dest, &hdr->saddr, ntohs(th->source), skb->dev->ifindex, inet6_sdif(skb)); if (!sk) { __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); return -ENOENT; } if (sk->sk_state == TCP_TIME_WAIT) { /* To increase the counter of ignored icmps for TCP-AO */ tcp_ao_ignore_icmp(sk, AF_INET6, type, code); inet_twsk_put(inet_twsk(sk)); return 0; } seq = ntohl(th->seq); fatal = icmpv6_err_convert(type, code, &err); if (sk->sk_state == TCP_NEW_SYN_RECV) { tcp_req_err(sk, seq, fatal); return 0; } if (tcp_ao_ignore_icmp(sk, AF_INET6, type, code)) { sock_put(sk); return 0; } bh_lock_sock(sk); if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS); if (sk->sk_state == TCP_CLOSE) goto out; if (static_branch_unlikely(&ip6_min_hopcount)) { /* min_hopcount can be changed concurrently from do_ipv6_setsockopt() */ if (ipv6_hdr(skb)->hop_limit < READ_ONCE(tcp_inet6_sk(sk)->min_hopcount)) { __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); goto out; } } tp = tcp_sk(sk); /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */ fastopen = rcu_dereference(tp->fastopen_rsk); snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; if (sk->sk_state != TCP_LISTEN && !between(seq, snd_una, tp->snd_nxt)) { __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS); goto out; } np = tcp_inet6_sk(sk); if (type == NDISC_REDIRECT) { if (!sock_owned_by_user(sk)) { struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); if (dst) dst->ops->redirect(dst, sk, skb); } goto out; } if (type == ICMPV6_PKT_TOOBIG) { u32 mtu = ntohl(info); /* We are not interested in TCP_LISTEN and open_requests * (SYN-ACKs send out by Linux are always <576bytes so * they should go through unfragmented). */ if (sk->sk_state == TCP_LISTEN) goto out; if (!ip6_sk_accept_pmtu(sk)) goto out; if (mtu < IPV6_MIN_MTU) goto out; WRITE_ONCE(tp->mtu_info, mtu); if (!sock_owned_by_user(sk)) tcp_v6_mtu_reduced(sk); else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags)) sock_hold(sk); goto out; } /* Might be for an request_sock */ switch (sk->sk_state) { case TCP_SYN_SENT: case TCP_SYN_RECV: /* Only in fast or simultaneous open. If a fast open socket is * already accepted it is treated as a connected one below. */ if (fastopen && !fastopen->sk) break; ipv6_icmp_error(sk, skb, err, th->dest, ntohl(info), (u8 *)th); if (!sock_owned_by_user(sk)) tcp_done_with_error(sk, err); else WRITE_ONCE(sk->sk_err_soft, err); goto out; case TCP_LISTEN: break; default: /* check if this ICMP message allows revert of backoff. * (see RFC 6069) */ if (!fastopen && type == ICMPV6_DEST_UNREACH && code == ICMPV6_NOROUTE) tcp_ld_RTO_revert(sk, seq); } if (!sock_owned_by_user(sk) && inet6_test_bit(RECVERR6, sk)) { WRITE_ONCE(sk->sk_err, err); sk_error_report(sk); } else { WRITE_ONCE(sk->sk_err_soft, err); } out: bh_unlock_sock(sk); sock_put(sk); return 0; } static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, struct flowi *fl, struct request_sock *req, struct tcp_fastopen_cookie *foc, enum tcp_synack_type synack_type, struct sk_buff *syn_skb) { struct inet_request_sock *ireq = inet_rsk(req); const struct ipv6_pinfo *np = tcp_inet6_sk(sk); struct ipv6_txoptions *opt; struct flowi6 *fl6 = &fl->u.ip6; struct sk_buff *skb; int err = -ENOMEM; u8 tclass; /* First, grab a route. */ if (!dst && (dst = inet6_csk_route_req(sk, fl6, req, IPPROTO_TCP)) == NULL) goto done; skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb); if (skb) { tcp_rsk(req)->syn_ect_snt = np->tclass & INET_ECN_MASK; __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr, &ireq->ir_v6_rmt_addr); fl6->daddr = ireq->ir_v6_rmt_addr; if (inet6_test_bit(REPFLOW, sk) && ireq->pktopts) fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts)); tclass = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ? (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) | (np->tclass & INET_ECN_MASK) : np->tclass; if (!INET_ECN_is_capable(tclass) && tcp_bpf_ca_needs_ecn((struct sock *)req)) tclass |= INET_ECN_ECT_0; rcu_read_lock(); opt = ireq->ipv6_opt; if (!opt) opt = rcu_dereference(np->opt); err = ip6_xmit(sk, skb, fl6, skb->mark ? : READ_ONCE(sk->sk_mark), opt, tclass, READ_ONCE(sk->sk_priority)); rcu_read_unlock(); err = net_xmit_eval(err); } done: return err; } static void tcp_v6_reqsk_destructor(struct request_sock *req) { kfree(inet_rsk(req)->ipv6_opt); consume_skb(inet_rsk(req)->pktopts); } #ifdef CONFIG_TCP_MD5SIG static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk, const struct in6_addr *addr, int l3index) { return tcp_md5_do_lookup(sk, l3index, (union tcp_md5_addr *)addr, AF_INET6); } static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk, const struct sock *addr_sk) { int l3index; l3index = l3mdev_master_ifindex_by_index(sock_net(sk), addr_sk->sk_bound_dev_if); return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr, l3index); } static int tcp_v6_parse_md5_keys(struct sock *sk, int optname, sockptr_t optval, int optlen) { struct tcp_md5sig cmd; struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr; union tcp_ao_addr *addr; int l3index = 0; u8 prefixlen; bool l3flag; u8 flags; if (optlen < sizeof(cmd)) return -EINVAL; if (copy_from_sockptr(&cmd, optval, sizeof(cmd))) return -EFAULT; if (sin6->sin6_family != AF_INET6) return -EINVAL; flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX; l3flag = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX; if (optname == TCP_MD5SIG_EXT && cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) { prefixlen = cmd.tcpm_prefixlen; if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) && prefixlen > 32)) return -EINVAL; } else { prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128; } if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex && cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) { struct net_device *dev; rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex); if (dev && netif_is_l3_master(dev)) l3index = dev->ifindex; rcu_read_unlock(); /* ok to reference set/not set outside of rcu; * right now device MUST be an L3 master */ if (!dev || !l3index) return -EINVAL; } if (!cmd.tcpm_keylen) { if (ipv6_addr_v4mapped(&sin6->sin6_addr)) return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3], AF_INET, prefixlen, l3index, flags); return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr, AF_INET6, prefixlen, l3index, flags); } if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) return -EINVAL; if (ipv6_addr_v4mapped(&sin6->sin6_addr)) { addr = (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3]; /* Don't allow keys for peers that have a matching TCP-AO key. * See the comment in tcp_ao_add_cmd() */ if (tcp_ao_required(sk, addr, AF_INET, l3flag ? l3index : -1, false)) return -EKEYREJECTED; return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index, flags, cmd.tcpm_key, cmd.tcpm_keylen); } addr = (union tcp_md5_addr *)&sin6->sin6_addr; /* Don't allow keys for peers that have a matching TCP-AO key. * See the comment in tcp_ao_add_cmd() */ if (tcp_ao_required(sk, addr, AF_INET6, l3flag ? l3index : -1, false)) return -EKEYREJECTED; return tcp_md5_do_add(sk, addr, AF_INET6, prefixlen, l3index, flags, cmd.tcpm_key, cmd.tcpm_keylen); } static void tcp_v6_md5_hash_headers(struct md5_ctx *ctx, const struct in6_addr *daddr, const struct in6_addr *saddr, const struct tcphdr *th, int nbytes) { struct { struct tcp6_pseudohdr ip; /* TCP pseudo-header (RFC2460) */ struct tcphdr tcp; } h; h.ip.saddr = *saddr; h.ip.daddr = *daddr; h.ip.protocol = cpu_to_be32(IPPROTO_TCP); h.ip.len = cpu_to_be32(nbytes); h.tcp = *th; h.tcp.check = 0; md5_update(ctx, (const u8 *)&h, sizeof(h.ip) + sizeof(h.tcp)); } static noinline_for_stack void tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, const struct in6_addr *daddr, struct in6_addr *saddr, const struct tcphdr *th) { struct md5_ctx ctx; md5_init(&ctx); tcp_v6_md5_hash_headers(&ctx, daddr, saddr, th, th->doff << 2); tcp_md5_hash_key(&ctx, key); md5_final(&ctx, md5_hash); } static noinline_for_stack void tcp_v6_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key, const struct sock *sk, const struct sk_buff *skb) { const struct tcphdr *th = tcp_hdr(skb); const struct in6_addr *saddr, *daddr; struct md5_ctx ctx; if (sk) { /* valid for establish/request sockets */ saddr = &sk->sk_v6_rcv_saddr; daddr = &sk->sk_v6_daddr; } else { const struct ipv6hdr *ip6h = ipv6_hdr(skb); saddr = &ip6h->saddr; daddr = &ip6h->daddr; } md5_init(&ctx); tcp_v6_md5_hash_headers(&ctx, daddr, saddr, th, skb->len); tcp_md5_hash_skb_data(&ctx, skb, th->doff << 2); tcp_md5_hash_key(&ctx, key); md5_final(&ctx, md5_hash); } #endif static void tcp_v6_init_req(struct request_sock *req, const struct sock *sk_listener, struct sk_buff *skb, u32 tw_isn) { bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags); struct inet_request_sock *ireq = inet_rsk(req); const struct ipv6_pinfo *np = tcp_inet6_sk(sk_listener); ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; ireq->ir_rmt_addr = LOOPBACK4_IPV6; ireq->ir_loc_addr = LOOPBACK4_IPV6; /* So that link locals have meaning */ if ((!sk_listener->sk_bound_dev_if || l3_slave) && ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) ireq->ir_iif = tcp_v6_iif(skb); if (!tw_isn && (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) || np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim || inet6_test_bit(REPFLOW, sk_listener))) { refcount_inc(&skb->users); ireq->pktopts = skb; } } static struct dst_entry *tcp_v6_route_req(const struct sock *sk, struct sk_buff *skb, struct flowi *fl, struct request_sock *req, u32 tw_isn) { tcp_v6_init_req(req, sk, skb, tw_isn); if (security_inet_conn_request(sk, skb, req)) return NULL; return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP); } struct request_sock_ops tcp6_request_sock_ops __read_mostly = { .family = AF_INET6, .obj_size = sizeof(struct tcp6_request_sock), .send_ack = tcp_v6_reqsk_send_ack, .destructor = tcp_v6_reqsk_destructor, .send_reset = tcp_v6_send_reset, }; const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr), #ifdef CONFIG_TCP_MD5SIG .req_md5_lookup = tcp_v6_md5_lookup, .calc_md5_hash = tcp_v6_md5_hash_skb, #endif #ifdef CONFIG_TCP_AO .ao_lookup = tcp_v6_ao_lookup_rsk, .ao_calc_key = tcp_v6_ao_calc_key_rsk, .ao_synack_hash = tcp_v6_ao_synack_hash, #endif #ifdef CONFIG_SYN_COOKIES .cookie_init_seq = cookie_v6_init_sequence, #endif .route_req = tcp_v6_route_req, .init_seq = tcp_v6_init_seq, .init_ts_off = tcp_v6_init_ts_off, .send_synack = tcp_v6_send_synack, }; static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, int rst, u8 tclass, __be32 label, u32 priority, u32 txhash, struct tcp_key *key) { struct net *net = sk ? sock_net(sk) : skb_dst_dev_net_rcu(skb); unsigned int tot_len = sizeof(struct tcphdr); struct sock *ctl_sk = net->ipv6.tcp_sk; const struct tcphdr *th = tcp_hdr(skb); __be32 mrst = 0, *topt; struct dst_entry *dst; struct sk_buff *buff; struct tcphdr *t1; struct flowi6 fl6; u32 mark = 0; if (tsecr) tot_len += TCPOLEN_TSTAMP_ALIGNED; if (tcp_key_is_md5(key)) tot_len += TCPOLEN_MD5SIG_ALIGNED; if (tcp_key_is_ao(key)) tot_len += tcp_ao_len_aligned(key->ao_key); #ifdef CONFIG_MPTCP if (rst && !tcp_key_is_md5(key)) { mrst = mptcp_reset_option(skb); if (mrst) tot_len += sizeof(__be32); } #endif buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); if (!buff) return; skb_reserve(buff, MAX_TCP_HEADER); t1 = skb_push(buff, tot_len); skb_reset_transport_header(buff); /* Swap the send and the receive. */ memset(t1, 0, sizeof(*t1)); t1->dest = th->source; t1->source = th->dest; t1->doff = tot_len / 4; t1->seq = htonl(seq); t1->ack_seq = htonl(ack); t1->ack = !rst || !th->ack; t1->rst = rst; t1->window = htons(win); topt = (__be32 *)(t1 + 1); if (tsecr) { *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); *topt++ = htonl(tsval); *topt++ = htonl(tsecr); } if (mrst) *topt++ = mrst; #ifdef CONFIG_TCP_MD5SIG if (tcp_key_is_md5(key)) { *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); tcp_v6_md5_hash_hdr((__u8 *)topt, key->md5_key, &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, t1); } #endif #ifdef CONFIG_TCP_AO if (tcp_key_is_ao(key)) { *topt++ = htonl((TCPOPT_AO << 24) | (tcp_ao_len(key->ao_key) << 16) | (key->ao_key->sndid << 8) | (key->rcv_next)); tcp_ao_hash_hdr(AF_INET6, (char *)topt, key->ao_key, key->traffic_key, (union tcp_ao_addr *)&ipv6_hdr(skb)->saddr, (union tcp_ao_addr *)&ipv6_hdr(skb)->daddr, t1, key->sne); } #endif memset(&fl6, 0, sizeof(fl6)); fl6.daddr = ipv6_hdr(skb)->saddr; fl6.saddr = ipv6_hdr(skb)->daddr; fl6.flowlabel = label; buff->ip_summed = CHECKSUM_PARTIAL; __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr); fl6.flowi6_proto = IPPROTO_TCP; if (rt6_need_strict(&fl6.daddr) && !oif) fl6.flowi6_oif = tcp_v6_iif(skb); else { if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) oif = skb->skb_iif; fl6.flowi6_oif = oif; } if (sk) { /* unconstify the socket only to attach it to buff with care. */ skb_set_owner_edemux(buff, (struct sock *)sk); psp_reply_set_decrypted(sk, buff); if (sk->sk_state == TCP_TIME_WAIT) mark = inet_twsk(sk)->tw_mark; else mark = READ_ONCE(sk->sk_mark); skb_set_delivery_time(buff, tcp_transmit_time(sk), SKB_CLOCK_MONOTONIC); } if (txhash) { /* autoflowlabel/skb_get_hash_flowi6 rely on buff->hash */ skb_set_hash(buff, txhash, PKT_HASH_TYPE_L4); } fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark; fl6.fl6_dport = t1->dest; fl6.fl6_sport = t1->source; fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL); security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6)); /* Pass a socket to ip6_dst_lookup either it is for RST * Underlying function will use this to retrieve the network * namespace */ if (sk && sk->sk_state != TCP_TIME_WAIT) dst = ip6_dst_lookup_flow(net, sk, &fl6, NULL); /*sk's xfrm_policy can be referred*/ else dst = ip6_dst_lookup_flow(net, ctl_sk, &fl6, NULL); if (!IS_ERR(dst)) { skb_dst_set(buff, dst); ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass, priority); TCP_INC_STATS(net, TCP_MIB_OUTSEGS); if (rst) TCP_INC_STATS(net, TCP_MIB_OUTRSTS); return; } kfree_skb(buff); } static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb, enum sk_rst_reason reason) { const struct tcphdr *th = tcp_hdr(skb); struct ipv6hdr *ipv6h = ipv6_hdr(skb); const __u8 *md5_hash_location = NULL; #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) bool allocated_traffic_key = false; #endif const struct tcp_ao_hdr *aoh; struct tcp_key key = {}; u32 seq = 0, ack_seq = 0; __be32 label = 0; u32 priority = 0; struct net *net; u32 txhash = 0; int oif = 0; #ifdef CONFIG_TCP_MD5SIG unsigned char newhash[16]; struct sock *sk1 = NULL; #endif if (th->rst) return; /* If sk not NULL, it means we did a successful lookup and incoming * route had to be correct. prequeue might have dropped our dst. */ if (!sk && !ipv6_unicast_destination(skb)) return; net = sk ? sock_net(sk) : skb_dst_dev_net_rcu(skb); /* Invalid TCP option size or twice included auth */ if (tcp_parse_auth_options(th, &md5_hash_location, &aoh)) return; #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) rcu_read_lock(); #endif #ifdef CONFIG_TCP_MD5SIG if (sk && sk_fullsock(sk)) { int l3index; /* sdif set, means packet ingressed via a device * in an L3 domain and inet_iif is set to it. */ l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0; key.md5_key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr, l3index); if (key.md5_key) key.type = TCP_KEY_MD5; } else if (md5_hash_location) { int dif = tcp_v6_iif_l3_slave(skb); int sdif = tcp_v6_sdif(skb); int l3index; /* * active side is lost. Try to find listening socket through * source port, and then find md5 key through listening socket. * we are not loose security here: * Incoming packet is checked with md5 hash with finding key, * no RST generated if md5 hash doesn't match. */ sk1 = inet6_lookup_listener(net, NULL, 0, &ipv6h->saddr, th->source, &ipv6h->daddr, ntohs(th->source), dif, sdif); if (!sk1) goto out; /* sdif set, means packet ingressed via a device * in an L3 domain and dif is set to it. */ l3index = tcp_v6_sdif(skb) ? dif : 0; key.md5_key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr, l3index); if (!key.md5_key) goto out; key.type = TCP_KEY_MD5; tcp_v6_md5_hash_skb(newhash, key.md5_key, NULL, skb); if (memcmp(md5_hash_location, newhash, 16) != 0) goto out; } #endif if (th->ack) seq = ntohl(th->ack_seq); else ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len - (th->doff << 2); #ifdef CONFIG_TCP_AO if (aoh) { int l3index; l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0; if (tcp_ao_prepare_reset(sk, skb, aoh, l3index, seq, &key.ao_key, &key.traffic_key, &allocated_traffic_key, &key.rcv_next, &key.sne)) goto out; key.type = TCP_KEY_AO; } #endif if (sk) { oif = sk->sk_bound_dev_if; if (sk_fullsock(sk)) { if (inet6_test_bit(REPFLOW, sk)) label = ip6_flowlabel(ipv6h); priority = READ_ONCE(sk->sk_priority); txhash = sk->sk_txhash; } if (sk->sk_state == TCP_TIME_WAIT) { label = cpu_to_be32(inet_twsk(sk)->tw_flowlabel); priority = inet_twsk(sk)->tw_priority; txhash = inet_twsk(sk)->tw_txhash; } } else { if (net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_TCP_RESET) label = ip6_flowlabel(ipv6h); } trace_tcp_send_reset(sk, skb, reason); tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, 1, ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK, label, priority, txhash, &key); #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) out: if (allocated_traffic_key) kfree(key.traffic_key); rcu_read_unlock(); #endif } static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, struct tcp_key *key, u8 tclass, __be32 label, u32 priority, u32 txhash) { tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, 0, tclass, label, priority, txhash, key); } static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb, enum tcp_tw_status tw_status) { struct inet_timewait_sock *tw = inet_twsk(sk); struct tcp_timewait_sock *tcptw = tcp_twsk(sk); u8 tclass = tw->tw_tclass; struct tcp_key key = {}; if (tw_status == TCP_TW_ACK_OOW) tclass &= ~INET_ECN_MASK; #ifdef CONFIG_TCP_AO struct tcp_ao_info *ao_info; if (static_branch_unlikely(&tcp_ao_needed.key)) { /* FIXME: the segment to-be-acked is not verified yet */ ao_info = rcu_dereference(tcptw->ao_info); if (ao_info) { const struct tcp_ao_hdr *aoh; /* Invalid TCP option size or twice included auth */ if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh)) goto out; if (aoh) key.ao_key = tcp_ao_established_key(sk, ao_info, aoh->rnext_keyid, -1); } } if (key.ao_key) { struct tcp_ao_key *rnext_key; key.traffic_key = snd_other_key(key.ao_key); /* rcv_next switches to our rcv_next */ rnext_key = READ_ONCE(ao_info->rnext_key); key.rcv_next = rnext_key->rcvid; key.sne = READ_ONCE(ao_info->snd_sne); key.type = TCP_KEY_AO; #else if (0) { #endif #ifdef CONFIG_TCP_MD5SIG } else if (static_branch_unlikely(&tcp_md5_needed.key)) { key.md5_key = tcp_twsk_md5_key(tcptw); if (key.md5_key) key.type = TCP_KEY_MD5; #endif } tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, READ_ONCE(tcptw->tw_rcv_nxt), tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcp_tw_tsval(tcptw), READ_ONCE(tcptw->tw_ts_recent), tw->tw_bound_dev_if, &key, tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority, tw->tw_txhash); #ifdef CONFIG_TCP_AO out: #endif inet_twsk_put(tw); } static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, struct request_sock *req) { struct tcp_key key = {}; #ifdef CONFIG_TCP_AO if (static_branch_unlikely(&tcp_ao_needed.key) && tcp_rsk_used_ao(req)) { const struct in6_addr *addr = &ipv6_hdr(skb)->saddr; const struct tcp_ao_hdr *aoh; int l3index; l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0; /* Invalid TCP option size or twice included auth */ if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh)) return; if (!aoh) return; key.ao_key = tcp_ao_do_lookup(sk, l3index, (union tcp_ao_addr *)addr, AF_INET6, aoh->rnext_keyid, -1); if (unlikely(!key.ao_key)) { /* Send ACK with any matching MKT for the peer */ key.ao_key = tcp_ao_do_lookup(sk, l3index, (union tcp_ao_addr *)addr, AF_INET6, -1, -1); /* Matching key disappeared (user removed the key?) * let the handshake timeout. */ if (!key.ao_key) { net_info_ratelimited("TCP-AO key for (%pI6, %d)->(%pI6, %d) suddenly disappeared, won't ACK new connection\n", addr, ntohs(tcp_hdr(skb)->source), &ipv6_hdr(skb)->daddr, ntohs(tcp_hdr(skb)->dest)); return; } } key.traffic_key = kmalloc(tcp_ao_digest_size(key.ao_key), GFP_ATOMIC); if (!key.traffic_key) return; key.type = TCP_KEY_AO; key.rcv_next = aoh->keyid; tcp_v6_ao_calc_key_rsk(key.ao_key, key.traffic_key, req); #else if (0) { #endif #ifdef CONFIG_TCP_MD5SIG } else if (static_branch_unlikely(&tcp_md5_needed.key)) { int l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0; key.md5_key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index); if (key.md5_key) key.type = TCP_KEY_MD5; #endif } /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV * sk->sk_state == TCP_SYN_RECV -> for Fast Open. */ tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, tcp_rsk(req)->rcv_nxt, tcp_synack_window(req) >> inet_rsk(req)->rcv_wscale, tcp_rsk_tsval(tcp_rsk(req)), req->ts_recent, sk->sk_bound_dev_if, &key, ipv6_get_dsfield(ipv6_hdr(skb)) & ~INET_ECN_MASK, 0, READ_ONCE(sk->sk_priority), READ_ONCE(tcp_rsk(req)->txhash)); if (tcp_key_is_ao(&key)) kfree(key.traffic_key); } static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb) { #ifdef CONFIG_SYN_COOKIES const struct tcphdr *th = tcp_hdr(skb); if (!th->syn) sk = cookie_v6_check(sk, skb); #endif return sk; } u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph, struct tcphdr *th, u32 *cookie) { u16 mss = 0; #ifdef CONFIG_SYN_COOKIES mss = tcp_get_syncookie_mss(&tcp6_request_sock_ops, &tcp_request_sock_ipv6_ops, sk, th); if (mss) { *cookie = __cookie_v6_init_sequence(iph, th, &mss); tcp_synq_overflow(sk); } #endif return mss; } static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) { if (skb->protocol == htons(ETH_P_IP)) return tcp_v4_conn_request(sk, skb); if (!ipv6_unicast_destination(skb)) goto drop; if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) { __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS); return 0; } return tcp_conn_request(&tcp6_request_sock_ops, &tcp_request_sock_ipv6_ops, sk, skb); drop: tcp_listendrop(sk); return 0; /* don't send reset */ } static void tcp_v6_restore_cb(struct sk_buff *skb) { /* We need to move header back to the beginning if xfrm6_policy_check() * and tcp_v6_fill_cb() are going to be called again. * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there. */ memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6, sizeof(struct inet6_skb_parm)); } static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst, struct request_sock *req_unhash, bool *own_req) { struct inet_request_sock *ireq; struct ipv6_pinfo *newnp; const struct ipv6_pinfo *np = tcp_inet6_sk(sk); struct ipv6_txoptions *opt; struct inet_sock *newinet; bool found_dup_sk = false; struct tcp_sock *newtp; struct sock *newsk; #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *key; int l3index; #endif struct flowi6 fl6; if (skb->protocol == htons(ETH_P_IP)) { /* * v6 mapped */ newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst, req_unhash, own_req); if (!newsk) return NULL; newinet = inet_sk(newsk); newinet->pinet6 = tcp_inet6_sk(newsk); newinet->ipv6_fl_list = NULL; newnp = tcp_inet6_sk(newsk); newtp = tcp_sk(newsk); memcpy(newnp, np, sizeof(struct ipv6_pinfo)); newnp->saddr = newsk->sk_v6_rcv_saddr; inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; if (sk_is_mptcp(newsk)) mptcpv6_handle_mapped(newsk, true); newsk->sk_backlog_rcv = tcp_v4_do_rcv; #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) newtp->af_specific = &tcp_sock_ipv6_mapped_specific; #endif newnp->ipv6_mc_list = NULL; newnp->ipv6_ac_list = NULL; newnp->pktoptions = NULL; newnp->opt = NULL; newnp->mcast_oif = inet_iif(skb); newnp->mcast_hops = ip_hdr(skb)->ttl; newnp->rcv_flowinfo = 0; if (inet6_test_bit(REPFLOW, sk)) newnp->flow_label = 0; /* * No need to charge this sock to the relevant IPv6 refcnt debug socks count * here, tcp_create_openreq_child now does this for us, see the comment in * that function for the gory details. -acme */ /* It is tricky place. Until this moment IPv4 tcp worked with IPv6 icsk.icsk_af_ops. Sync it now. */ tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie); return newsk; } ireq = inet_rsk(req); if (sk_acceptq_is_full(sk)) goto exit_overflow; if (!dst) { dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP); if (!dst) goto exit; } newsk = tcp_create_openreq_child(sk, req, skb); if (!newsk) goto exit_nonewsk; /* * No need to charge this sock to the relevant IPv6 refcnt debug socks * count here, tcp_create_openreq_child now does this for us, see the * comment in that function for the gory details. -acme */ newsk->sk_gso_type = SKB_GSO_TCPV6; inet6_sk_rx_dst_set(newsk, skb); newinet = inet_sk(newsk); newinet->pinet6 = tcp_inet6_sk(newsk); newinet->ipv6_fl_list = NULL; newinet->inet_opt = NULL; newtp = tcp_sk(newsk); newnp = tcp_inet6_sk(newsk); memcpy(newnp, np, sizeof(struct ipv6_pinfo)); ip6_dst_store(newsk, dst, false, false); newnp->saddr = ireq->ir_v6_loc_addr; /* Now IPv6 options... First: no IPv4 options. */ newnp->ipv6_mc_list = NULL; newnp->ipv6_ac_list = NULL; /* Clone RX bits */ newnp->rxopt.all = np->rxopt.all; newnp->pktoptions = NULL; newnp->opt = NULL; newnp->mcast_oif = tcp_v6_iif(skb); newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb)); if (inet6_test_bit(REPFLOW, sk)) newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb)); /* Set ToS of the new socket based upon the value of incoming SYN. * ECT bits are set later in tcp_init_transfer(). */ if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)) newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK; /* Clone native IPv6 options from listening socket (if any) Yes, keeping reference count would be much more clever, but we make one more one thing there: reattach optmem to newsk. */ opt = ireq->ipv6_opt; if (!opt) opt = rcu_dereference(np->opt); if (opt) { opt = ipv6_dup_options(newsk, opt); RCU_INIT_POINTER(newnp->opt, opt); } inet_csk(newsk)->icsk_ext_hdr_len = 0; if (opt) inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen + opt->opt_flen; tcp_ca_openreq_child(newsk, dst); tcp_sync_mss(newsk, dst_mtu(dst)); newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst)); tcp_initialize_rcv_mss(newsk); #ifdef CONFIG_TCP_MD5SIG l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif); if (!tcp_rsk_used_ao(req)) { /* Copy over the MD5 key from the original socket */ key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr, l3index); if (key) { const union tcp_md5_addr *addr; addr = (union tcp_md5_addr *)&newsk->sk_v6_daddr; if (tcp_md5_key_copy(newsk, addr, AF_INET6, 128, l3index, key)) goto put_and_exit; } } #endif #ifdef CONFIG_TCP_AO /* Copy over tcp_ao_info if any */ if (tcp_ao_copy_all_matching(sk, newsk, req, skb, AF_INET6)) goto put_and_exit; /* OOM */ #endif if (__inet_inherit_port(sk, newsk) < 0) goto put_and_exit; *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), &found_dup_sk); if (*own_req) { tcp_move_syn(newtp, req); /* Clone pktoptions received with SYN, if we own the req */ if (ireq->pktopts) { newnp->pktoptions = skb_clone_and_charge_r(ireq->pktopts, newsk); consume_skb(ireq->pktopts); ireq->pktopts = NULL; if (newnp->pktoptions) tcp_v6_restore_cb(newnp->pktoptions); } } else { if (!req_unhash && found_dup_sk) { /* This code path should only be executed in the * syncookie case only */ bh_unlock_sock(newsk); sock_put(newsk); newsk = NULL; } } return newsk; exit_overflow: __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); exit_nonewsk: dst_release(dst); exit: tcp_listendrop(sk); return NULL; put_and_exit: inet_csk_prepare_forced_close(newsk); tcp_done(newsk); goto exit; } INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *, u32)); /* The socket must have it's spinlock held when we get * here, unless it is a TCP_LISTEN socket. * * We have a potential double-lock case here, so even when * doing backlog processing we use the BH locking scheme. * This is because we cannot sleep with the original spinlock * held. */ INDIRECT_CALLABLE_SCOPE int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) { struct ipv6_pinfo *np = tcp_inet6_sk(sk); struct sk_buff *opt_skb = NULL; enum skb_drop_reason reason; struct tcp_sock *tp; /* Imagine: socket is IPv6. IPv4 packet arrives, goes to IPv4 receive handler and backlogged. From backlog it always goes here. Kerboom... Fortunately, tcp_rcv_established and rcv_established handle them correctly, but it is not case with tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK */ if (skb->protocol == htons(ETH_P_IP)) return tcp_v4_do_rcv(sk, skb); reason = psp_sk_rx_policy_check(sk, skb); if (reason) goto err_discard; /* * socket locking is here for SMP purposes as backlog rcv * is currently called with bh processing disabled. */ /* Do Stevens' IPV6_PKTOPTIONS. Yes, guys, it is the only place in our code, where we may make it not affecting IPv4. The rest of code is protocol independent, and I do not like idea to uglify IPv4. Actually, all the idea behind IPV6_PKTOPTIONS looks not very well thought. For now we latch options, received in the last packet, enqueued by tcp. Feel free to propose better solution. --ANK (980728) */ if (np->rxopt.all && sk->sk_state != TCP_LISTEN) opt_skb = skb_clone_and_charge_r(skb, sk); if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ struct dst_entry *dst; dst = rcu_dereference_protected(sk->sk_rx_dst, lockdep_sock_is_held(sk)); sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); if (dst) { if (sk->sk_rx_dst_ifindex != skb->skb_iif || INDIRECT_CALL_1(dst->ops->check, ip6_dst_check, dst, sk->sk_rx_dst_cookie) == NULL) { RCU_INIT_POINTER(sk->sk_rx_dst, NULL); dst_release(dst); } } tcp_rcv_established(sk, skb); if (opt_skb) goto ipv6_pktoptions; return 0; } if (tcp_checksum_complete(skb)) goto csum_err; if (sk->sk_state == TCP_LISTEN) { struct sock *nsk = tcp_v6_cookie_check(sk, skb); if (nsk != sk) { if (nsk) { reason = tcp_child_process(sk, nsk, skb); if (reason) goto reset; } return 0; } } else sock_rps_save_rxhash(sk, skb); reason = tcp_rcv_state_process(sk, skb); if (reason) goto reset; if (opt_skb) goto ipv6_pktoptions; return 0; reset: tcp_v6_send_reset(sk, skb, sk_rst_convert_drop_reason(reason)); discard: if (opt_skb) __kfree_skb(opt_skb); sk_skb_reason_drop(sk, skb, reason); return 0; csum_err: reason = SKB_DROP_REASON_TCP_CSUM; trace_tcp_bad_csum(skb); TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); err_discard: TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); goto discard; ipv6_pktoptions: /* Do you ask, what is it? 1. skb was enqueued by tcp. 2. skb is added to tail of read queue, rather than out of order. 3. socket is not in passive state. 4. Finally, it really contains options, which user wants to receive. */ tp = tcp_sk(sk); if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt && !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo) WRITE_ONCE(np->mcast_oif, tcp_v6_iif(opt_skb)); if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) WRITE_ONCE(np->mcast_hops, ipv6_hdr(opt_skb)->hop_limit); if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass) np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb)); if (inet6_test_bit(REPFLOW, sk)) np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb)); if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) { tcp_v6_restore_cb(opt_skb); opt_skb = xchg(&np->pktoptions, opt_skb); } else { __kfree_skb(opt_skb); opt_skb = xchg(&np->pktoptions, NULL); } } consume_skb(opt_skb); return 0; } static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr, const struct tcphdr *th) { /* This is tricky: we move IP6CB at its correct location into * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because * _decode_session6() uses IP6CB(). * barrier() makes sure compiler won't play aliasing games. */ memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb), sizeof(struct inet6_skb_parm)); barrier(); TCP_SKB_CB(skb)->seq = ntohl(th->seq); TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + skb->len - th->doff*4); TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); TCP_SKB_CB(skb)->tcp_flags = tcp_flags_ntohs(th); TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr); TCP_SKB_CB(skb)->sacked = 0; TCP_SKB_CB(skb)->has_rxtstamp = skb->tstamp || skb_hwtstamps(skb)->hwtstamp; } INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) { struct net *net = dev_net_rcu(skb->dev); enum skb_drop_reason drop_reason; enum tcp_tw_status tw_status; int sdif = inet6_sdif(skb); int dif = inet6_iif(skb); const struct tcphdr *th; const struct ipv6hdr *hdr; struct sock *sk = NULL; bool refcounted; int ret; u32 isn; drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; if (skb->pkt_type != PACKET_HOST) goto discard_it; /* * Count it even if it's bad. */ __TCP_INC_STATS(net, TCP_MIB_INSEGS); if (!pskb_may_pull(skb, sizeof(struct tcphdr))) goto discard_it; th = (const struct tcphdr *)skb->data; if (unlikely(th->doff < sizeof(struct tcphdr) / 4)) { drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL; goto bad_packet; } if (!pskb_may_pull(skb, th->doff*4)) goto discard_it; if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo)) goto csum_error; th = (const struct tcphdr *)skb->data; hdr = ipv6_hdr(skb); lookup: sk = __inet6_lookup_skb(skb, __tcp_hdrlen(th), th->source, th->dest, inet6_iif(skb), sdif, &refcounted); if (!sk) goto no_tcp_socket; if (sk->sk_state == TCP_TIME_WAIT) goto do_time_wait; if (sk->sk_state == TCP_NEW_SYN_RECV) { struct request_sock *req = inet_reqsk(sk); bool req_stolen = false; struct sock *nsk; sk = req->rsk_listener; if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) drop_reason = SKB_DROP_REASON_XFRM_POLICY; else drop_reason = tcp_inbound_hash(sk, req, skb, &hdr->saddr, &hdr->daddr, AF_INET6, dif, sdif); if (drop_reason) { sk_drops_skbadd(sk, skb); reqsk_put(req); goto discard_it; } if (tcp_checksum_complete(skb)) { reqsk_put(req); goto csum_error; } if (unlikely(sk->sk_state != TCP_LISTEN)) { nsk = reuseport_migrate_sock(sk, req_to_sk(req), skb); if (!nsk) { inet_csk_reqsk_queue_drop_and_put(sk, req); goto lookup; } sk = nsk; /* reuseport_migrate_sock() has already held one sk_refcnt * before returning. */ } else { sock_hold(sk); } refcounted = true; nsk = NULL; if (!tcp_filter(sk, skb, &drop_reason)) { th = (const struct tcphdr *)skb->data; hdr = ipv6_hdr(skb); tcp_v6_fill_cb(skb, hdr, th); nsk = tcp_check_req(sk, skb, req, false, &req_stolen, &drop_reason); } if (!nsk) { reqsk_put(req); if (req_stolen) { /* Another cpu got exclusive access to req * and created a full blown socket. * Try to feed this packet to this socket * instead of discarding it. */ tcp_v6_restore_cb(skb); sock_put(sk); goto lookup; } goto discard_and_relse; } nf_reset_ct(skb); if (nsk == sk) { reqsk_put(req); tcp_v6_restore_cb(skb); } else { drop_reason = tcp_child_process(sk, nsk, skb); if (drop_reason) { enum sk_rst_reason rst_reason; rst_reason = sk_rst_convert_drop_reason(drop_reason); tcp_v6_send_reset(nsk, skb, rst_reason); goto discard_and_relse; } sock_put(sk); return 0; } } process: if (static_branch_unlikely(&ip6_min_hopcount)) { /* min_hopcount can be changed concurrently from do_ipv6_setsockopt() */ if (unlikely(hdr->hop_limit < READ_ONCE(tcp_inet6_sk(sk)->min_hopcount))) { __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); drop_reason = SKB_DROP_REASON_TCP_MINTTL; goto discard_and_relse; } } if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) { drop_reason = SKB_DROP_REASON_XFRM_POLICY; goto discard_and_relse; } drop_reason = tcp_inbound_hash(sk, NULL, skb, &hdr->saddr, &hdr->daddr, AF_INET6, dif, sdif); if (drop_reason) goto discard_and_relse; nf_reset_ct(skb); if (tcp_filter(sk, skb, &drop_reason)) goto discard_and_relse; th = (const struct tcphdr *)skb->data; hdr = ipv6_hdr(skb); tcp_v6_fill_cb(skb, hdr, th); skb->dev = NULL; if (sk->sk_state == TCP_LISTEN) { ret = tcp_v6_do_rcv(sk, skb); goto put_and_return; } sk_incoming_cpu_update(sk); bh_lock_sock_nested(sk); tcp_segs_in(tcp_sk(sk), skb); ret = 0; if (!sock_owned_by_user(sk)) { ret = tcp_v6_do_rcv(sk, skb); } else { if (tcp_add_backlog(sk, skb, &drop_reason)) goto discard_and_relse; } bh_unlock_sock(sk); put_and_return: if (refcounted) sock_put(sk); return ret ? -1 : 0; no_tcp_socket: drop_reason = SKB_DROP_REASON_NO_SOCKET; if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard_it; tcp_v6_fill_cb(skb, hdr, th); if (tcp_checksum_complete(skb)) { csum_error: drop_reason = SKB_DROP_REASON_TCP_CSUM; trace_tcp_bad_csum(skb); __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS); bad_packet: __TCP_INC_STATS(net, TCP_MIB_INERRS); } else { tcp_v6_send_reset(NULL, skb, sk_rst_convert_drop_reason(drop_reason)); } discard_it: SKB_DR_OR(drop_reason, NOT_SPECIFIED); sk_skb_reason_drop(sk, skb, drop_reason); return 0; discard_and_relse: sk_drops_skbadd(sk, skb); if (refcounted) sock_put(sk); goto discard_it; do_time_wait: if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { drop_reason = SKB_DROP_REASON_XFRM_POLICY; inet_twsk_put(inet_twsk(sk)); goto discard_it; } tcp_v6_fill_cb(skb, hdr, th); if (tcp_checksum_complete(skb)) { inet_twsk_put(inet_twsk(sk)); goto csum_error; } tw_status = tcp_timewait_state_process(inet_twsk(sk), skb, th, &isn, &drop_reason); switch (tw_status) { case TCP_TW_SYN: { struct sock *sk2; sk2 = inet6_lookup_listener(net, skb, __tcp_hdrlen(th), &ipv6_hdr(skb)->saddr, th->source, &ipv6_hdr(skb)->daddr, ntohs(th->dest), tcp_v6_iif_l3_slave(skb), sdif); if (sk2) { struct inet_timewait_sock *tw = inet_twsk(sk); inet_twsk_deschedule_put(tw); sk = sk2; tcp_v6_restore_cb(skb); refcounted = false; __this_cpu_write(tcp_tw_isn, isn); goto process; } drop_reason = psp_twsk_rx_policy_check(inet_twsk(sk), skb); if (drop_reason) break; } /* to ACK */ fallthrough; case TCP_TW_ACK: case TCP_TW_ACK_OOW: tcp_v6_timewait_ack(sk, skb, tw_status); break; case TCP_TW_RST: tcp_v6_send_reset(sk, skb, SK_RST_REASON_TCP_TIMEWAIT_SOCKET); inet_twsk_deschedule_put(inet_twsk(sk)); goto discard_it; case TCP_TW_SUCCESS: ; } goto discard_it; } void tcp_v6_early_demux(struct sk_buff *skb) { struct net *net = dev_net_rcu(skb->dev); const struct ipv6hdr *hdr; const struct tcphdr *th; struct sock *sk; if (skb->pkt_type != PACKET_HOST) return; if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr))) return; hdr = ipv6_hdr(skb); th = tcp_hdr(skb); if (th->doff < sizeof(struct tcphdr) / 4) return; /* Note : We use inet6_iif() here, not tcp_v6_iif() */ sk = __inet6_lookup_established(net, &hdr->saddr, th->source, &hdr->daddr, ntohs(th->dest), inet6_iif(skb), inet6_sdif(skb)); if (sk) { skb->sk = sk; skb->destructor = sock_edemux; if (sk_fullsock(sk)) { struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst); if (dst) dst = dst_check(dst, sk->sk_rx_dst_cookie); if (dst && sk->sk_rx_dst_ifindex == skb->skb_iif) skb_dst_set_noref(skb, dst); } } } static struct timewait_sock_ops tcp6_timewait_sock_ops = { .twsk_obj_size = sizeof(struct tcp6_timewait_sock), }; INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb) { __tcp_v6_send_check(skb, &sk->sk_v6_rcv_saddr, &sk->sk_v6_daddr); } const struct inet_connection_sock_af_ops ipv6_specific = { .queue_xmit = inet6_csk_xmit, .send_check = tcp_v6_send_check, .rebuild_header = inet6_sk_rebuild_header, .sk_rx_dst_set = inet6_sk_rx_dst_set, .conn_request = tcp_v6_conn_request, .syn_recv_sock = tcp_v6_syn_recv_sock, .net_header_len = sizeof(struct ipv6hdr), .setsockopt = ipv6_setsockopt, .getsockopt = ipv6_getsockopt, .mtu_reduced = tcp_v6_mtu_reduced, }; #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = { #ifdef CONFIG_TCP_MD5SIG .md5_lookup = tcp_v6_md5_lookup, .calc_md5_hash = tcp_v6_md5_hash_skb, .md5_parse = tcp_v6_parse_md5_keys, #endif #ifdef CONFIG_TCP_AO .ao_lookup = tcp_v6_ao_lookup, .calc_ao_hash = tcp_v6_ao_hash_skb, .ao_parse = tcp_v6_parse_ao, .ao_calc_key_sk = tcp_v6_ao_calc_key_sk, #endif }; #endif /* * TCP over IPv4 via INET6 API */ static const struct inet_connection_sock_af_ops ipv6_mapped = { .queue_xmit = ip_queue_xmit, .send_check = tcp_v4_send_check, .rebuild_header = inet_sk_rebuild_header, .sk_rx_dst_set = inet_sk_rx_dst_set, .conn_request = tcp_v6_conn_request, .syn_recv_sock = tcp_v6_syn_recv_sock, .net_header_len = sizeof(struct iphdr), .setsockopt = ipv6_setsockopt, .getsockopt = ipv6_getsockopt, .mtu_reduced = tcp_v4_mtu_reduced, }; #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = { #ifdef CONFIG_TCP_MD5SIG .md5_lookup = tcp_v4_md5_lookup, .calc_md5_hash = tcp_v4_md5_hash_skb, .md5_parse = tcp_v6_parse_md5_keys, #endif #ifdef CONFIG_TCP_AO .ao_lookup = tcp_v6_ao_lookup, .calc_ao_hash = tcp_v4_ao_hash_skb, .ao_parse = tcp_v6_parse_ao, .ao_calc_key_sk = tcp_v4_ao_calc_key_sk, #endif }; static void tcp6_destruct_sock(struct sock *sk) { tcp_md5_destruct_sock(sk); tcp_ao_destroy_sock(sk, false); inet6_sock_destruct(sk); } #endif /* NOTE: A lot of things set to zero explicitly by call to * sk_alloc() so need not be done here. */ static int tcp_v6_init_sock(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); tcp_init_sock(sk); icsk->icsk_af_ops = &ipv6_specific; #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific; sk->sk_destruct = tcp6_destruct_sock; #endif return 0; } #ifdef CONFIG_PROC_FS /* Proc filesystem TCPv6 sock list dumping. */ static void get_openreq6(struct seq_file *seq, const struct request_sock *req, int i) { long ttd = req->rsk_timer.expires - jiffies; const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr; const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr; if (ttd < 0) ttd = 0; seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], inet_rsk(req)->ir_num, dest->s6_addr32[0], dest->s6_addr32[1], dest->s6_addr32[2], dest->s6_addr32[3], ntohs(inet_rsk(req)->ir_rmt_port), TCP_SYN_RECV, 0, 0, /* could print option size, but that is af dependent. */ 1, /* timers active (only the expire timer) */ jiffies_to_clock_t(ttd), req->num_timeout, from_kuid_munged(seq_user_ns(seq), sk_uid(req->rsk_listener)), 0, /* non standard timer */ 0, /* open_requests have no inode */ 0, req); } static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) { const struct in6_addr *dest, *src; __u16 destp, srcp; int timer_active; unsigned long timer_expires; const struct inet_sock *inet = inet_sk(sp); const struct tcp_sock *tp = tcp_sk(sp); const struct inet_connection_sock *icsk = inet_csk(sp); const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq; u8 icsk_pending; int rx_queue; int state; dest = &sp->sk_v6_daddr; src = &sp->sk_v6_rcv_saddr; destp = ntohs(inet->inet_dport); srcp = ntohs(inet->inet_sport); icsk_pending = smp_load_acquire(&icsk->icsk_pending); if (icsk_pending == ICSK_TIME_RETRANS || icsk_pending == ICSK_TIME_REO_TIMEOUT || icsk_pending == ICSK_TIME_LOSS_PROBE) { timer_active = 1; timer_expires = tcp_timeout_expires(sp); } else if (icsk_pending == ICSK_TIME_PROBE0) { timer_active = 4; timer_expires = tcp_timeout_expires(sp); } else if (timer_pending(&icsk->icsk_keepalive_timer)) { timer_active = 2; timer_expires = icsk->icsk_keepalive_timer.expires; } else { timer_active = 0; timer_expires = jiffies; } state = inet_sk_state_load(sp); if (state == TCP_LISTEN) rx_queue = READ_ONCE(sp->sk_ack_backlog); else /* Because we don't lock the socket, * we might find a transient negative value. */ rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq), 0); seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, dest->s6_addr32[0], dest->s6_addr32[1], dest->s6_addr32[2], dest->s6_addr32[3], destp, state, READ_ONCE(tp->write_seq) - tp->snd_una, rx_queue, timer_active, jiffies_delta_to_clock_t(timer_expires - jiffies), READ_ONCE(icsk->icsk_retransmits), from_kuid_munged(seq_user_ns(seq), sk_uid(sp)), READ_ONCE(icsk->icsk_probes_out), sock_i_ino(sp), refcount_read(&sp->sk_refcnt), sp, jiffies_to_clock_t(icsk->icsk_rto), jiffies_to_clock_t(icsk->icsk_ack.ato), (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp), tcp_snd_cwnd(tp), state == TCP_LISTEN ? fastopenq->max_qlen : (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh) ); } static void get_timewait6_sock(struct seq_file *seq, struct inet_timewait_sock *tw, int i) { long delta = tw->tw_timer.expires - jiffies; const struct in6_addr *dest, *src; __u16 destp, srcp; dest = &tw->tw_v6_daddr; src = &tw->tw_v6_rcv_saddr; destp = ntohs(tw->tw_dport); srcp = ntohs(tw->tw_sport); seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, dest->s6_addr32[0], dest->s6_addr32[1], dest->s6_addr32[2], dest->s6_addr32[3], destp, READ_ONCE(tw->tw_substate), 0, 0, 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0, refcount_read(&tw->tw_refcnt), tw); } static int tcp6_seq_show(struct seq_file *seq, void *v) { struct tcp_iter_state *st; struct sock *sk = v; if (v == SEQ_START_TOKEN) { seq_puts(seq, " sl " "local_address " "remote_address " "st tx_queue rx_queue tr tm->when retrnsmt" " uid timeout inode\n"); goto out; } st = seq->private; if (sk->sk_state == TCP_TIME_WAIT) get_timewait6_sock(seq, v, st->num); else if (sk->sk_state == TCP_NEW_SYN_RECV) get_openreq6(seq, v, st->num); else get_tcp6_sock(seq, v, st->num); out: return 0; } static const struct seq_operations tcp6_seq_ops = { .show = tcp6_seq_show, .start = tcp_seq_start, .next = tcp_seq_next, .stop = tcp_seq_stop, }; static struct tcp_seq_afinfo tcp6_seq_afinfo = { .family = AF_INET6, }; int __net_init tcp6_proc_init(struct net *net) { if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops, sizeof(struct tcp_iter_state), &tcp6_seq_afinfo)) return -ENOMEM; return 0; } void tcp6_proc_exit(struct net *net) { remove_proc_entry("tcp6", net->proc_net); } #endif struct proto tcpv6_prot = { .name = "TCPv6", .owner = THIS_MODULE, .close = tcp_close, .pre_connect = tcp_v6_pre_connect, .connect = tcp_v6_connect, .disconnect = tcp_disconnect, .accept = inet_csk_accept, .ioctl = tcp_ioctl, .init = tcp_v6_init_sock, .destroy = tcp_v4_destroy_sock, .shutdown = tcp_shutdown, .setsockopt = tcp_setsockopt, .getsockopt = tcp_getsockopt, .bpf_bypass_getsockopt = tcp_bpf_bypass_getsockopt, .keepalive = tcp_set_keepalive, .recvmsg = tcp_recvmsg, .sendmsg = tcp_sendmsg, .splice_eof = tcp_splice_eof, .backlog_rcv = tcp_v6_do_rcv, .release_cb = tcp_release_cb, .hash = inet_hash, .unhash = inet_unhash, .get_port = inet_csk_get_port, .put_port = inet_put_port, #ifdef CONFIG_BPF_SYSCALL .psock_update_sk_prot = tcp_bpf_update_proto, #endif .enter_memory_pressure = tcp_enter_memory_pressure, .leave_memory_pressure = tcp_leave_memory_pressure, .stream_memory_free = tcp_stream_memory_free, .sockets_allocated = &tcp_sockets_allocated, .memory_allocated = &net_aligned_data.tcp_memory_allocated, .per_cpu_fw_alloc = &tcp_memory_per_cpu_fw_alloc, .memory_pressure = &tcp_memory_pressure, .sysctl_mem = sysctl_tcp_mem, .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem), .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem), .max_header = MAX_TCP_HEADER, .obj_size = sizeof(struct tcp6_sock), .ipv6_pinfo_offset = offsetof(struct tcp6_sock, inet6), .slab_flags = SLAB_TYPESAFE_BY_RCU, .twsk_prot = &tcp6_timewait_sock_ops, .rsk_prot = &tcp6_request_sock_ops, .h.hashinfo = NULL, .no_autobind = true, .diag_destroy = tcp_abort, }; EXPORT_SYMBOL_GPL(tcpv6_prot); static struct inet_protosw tcpv6_protosw = { .type = SOCK_STREAM, .protocol = IPPROTO_TCP, .prot = &tcpv6_prot, .ops = &inet6_stream_ops, .flags = INET_PROTOSW_PERMANENT | INET_PROTOSW_ICSK, }; static int __net_init tcpv6_net_init(struct net *net) { int res; res = inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6, SOCK_RAW, IPPROTO_TCP, net); if (!res) net->ipv6.tcp_sk->sk_clockid = CLOCK_MONOTONIC; return res; } static void __net_exit tcpv6_net_exit(struct net *net) { inet_ctl_sock_destroy(net->ipv6.tcp_sk); } static struct pernet_operations tcpv6_net_ops = { .init = tcpv6_net_init, .exit = tcpv6_net_exit, }; int __init tcpv6_init(void) { int ret; net_hotdata.tcpv6_protocol = (struct inet6_protocol) { .handler = tcp_v6_rcv, .err_handler = tcp_v6_err, .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL, }; ret = inet6_add_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP); if (ret) goto out; /* register inet6 protocol */ ret = inet6_register_protosw(&tcpv6_protosw); if (ret) goto out_tcpv6_protocol; ret = register_pernet_subsys(&tcpv6_net_ops); if (ret) goto out_tcpv6_protosw; ret = mptcpv6_init(); if (ret) goto out_tcpv6_pernet_subsys; out: return ret; out_tcpv6_pernet_subsys: unregister_pernet_subsys(&tcpv6_net_ops); out_tcpv6_protosw: inet6_unregister_protosw(&tcpv6_protosw); out_tcpv6_protocol: inet6_del_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP); goto out; } void tcpv6_exit(void) { unregister_pernet_subsys(&tcpv6_net_ops); inet6_unregister_protosw(&tcpv6_protosw); inet6_del_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP); }
178 178 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 // SPDX-License-Identifier: GPL-2.0-or-later /* Copyright 2020 NXP */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/init.h> #include <linux/slab.h> #include <net/act_api.h> #include <net/netlink.h> #include <net/pkt_cls.h> #include <net/tc_act/tc_gate.h> #include <net/tc_wrapper.h> static struct tc_action_ops act_gate_ops; static ktime_t gate_get_time(struct tcf_gate *gact) { ktime_t mono = ktime_get(); switch (gact->tk_offset) { case TK_OFFS_MAX: return mono; default: return ktime_mono_to_any(mono, gact->tk_offset); } return KTIME_MAX; } static void gate_get_start_time(struct tcf_gate *gact, ktime_t *start) { struct tcf_gate_params *param = &gact->param; ktime_t now, base, cycle; u64 n; base = ns_to_ktime(param->tcfg_basetime); now = gate_get_time(gact); if (ktime_after(base, now)) { *start = base; return; } cycle = param->tcfg_cycletime; n = div64_u64(ktime_sub_ns(now, base), cycle); *start = ktime_add_ns(base, (n + 1) * cycle); } static void gate_start_timer(struct tcf_gate *gact, ktime_t start) { ktime_t expires; expires = hrtimer_get_expires(&gact->hitimer); if (expires == 0) expires = KTIME_MAX; start = min_t(ktime_t, start, expires); hrtimer_start(&gact->hitimer, start, HRTIMER_MODE_ABS_SOFT); } static enum hrtimer_restart gate_timer_func(struct hrtimer *timer) { struct tcf_gate *gact = container_of(timer, struct tcf_gate, hitimer); struct tcf_gate_params *p = &gact->param; struct tcfg_gate_entry *next; ktime_t close_time, now; spin_lock(&gact->tcf_lock); next = gact->next_entry; /* cycle start, clear pending bit, clear total octets */ gact->current_gate_status = next->gate_state ? GATE_ACT_GATE_OPEN : 0; gact->current_entry_octets = 0; gact->current_max_octets = next->maxoctets; gact->current_close_time = ktime_add_ns(gact->current_close_time, next->interval); close_time = gact->current_close_time; if (list_is_last(&next->list, &p->entries)) next = list_first_entry(&p->entries, struct tcfg_gate_entry, list); else next = list_next_entry(next, list); now = gate_get_time(gact); if (ktime_after(now, close_time)) { ktime_t cycle, base; u64 n; cycle = p->tcfg_cycletime; base = ns_to_ktime(p->tcfg_basetime); n = div64_u64(ktime_sub_ns(now, base), cycle); close_time = ktime_add_ns(base, (n + 1) * cycle); } gact->next_entry = next; hrtimer_set_expires(&gact->hitimer, close_time); spin_unlock(&gact->tcf_lock); return HRTIMER_RESTART; } TC_INDIRECT_SCOPE int tcf_gate_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_gate *gact = to_gate(a); int action = READ_ONCE(gact->tcf_action); tcf_lastuse_update(&gact->tcf_tm); tcf_action_update_bstats(&gact->common, skb); spin_lock(&gact->tcf_lock); if (unlikely(gact->current_gate_status & GATE_ACT_PENDING)) { spin_unlock(&gact->tcf_lock); return action; } if (!(gact->current_gate_status & GATE_ACT_GATE_OPEN)) { spin_unlock(&gact->tcf_lock); goto drop; } if (gact->current_max_octets >= 0) { gact->current_entry_octets += qdisc_pkt_len(skb); if (gact->current_entry_octets > gact->current_max_octets) { spin_unlock(&gact->tcf_lock); goto overlimit; } } spin_unlock(&gact->tcf_lock); return action; overlimit: tcf_action_inc_overlimit_qstats(&gact->common); drop: tcf_action_inc_drop_qstats(&gact->common); return TC_ACT_SHOT; } static const struct nla_policy entry_policy[TCA_GATE_ENTRY_MAX + 1] = { [TCA_GATE_ENTRY_INDEX] = { .type = NLA_U32 }, [TCA_GATE_ENTRY_GATE] = { .type = NLA_FLAG }, [TCA_GATE_ENTRY_INTERVAL] = { .type = NLA_U32 }, [TCA_GATE_ENTRY_IPV] = { .type = NLA_S32 }, [TCA_GATE_ENTRY_MAX_OCTETS] = { .type = NLA_S32 }, }; static const struct nla_policy gate_policy[TCA_GATE_MAX + 1] = { [TCA_GATE_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_gate)), [TCA_GATE_PRIORITY] = { .type = NLA_S32 }, [TCA_GATE_ENTRY_LIST] = { .type = NLA_NESTED }, [TCA_GATE_BASE_TIME] = { .type = NLA_U64 }, [TCA_GATE_CYCLE_TIME] = { .type = NLA_U64 }, [TCA_GATE_CYCLE_TIME_EXT] = { .type = NLA_U64 }, [TCA_GATE_FLAGS] = { .type = NLA_U32 }, [TCA_GATE_CLOCKID] = { .type = NLA_S32 }, }; static int fill_gate_entry(struct nlattr **tb, struct tcfg_gate_entry *entry, struct netlink_ext_ack *extack) { u32 interval = 0; entry->gate_state = nla_get_flag(tb[TCA_GATE_ENTRY_GATE]); if (tb[TCA_GATE_ENTRY_INTERVAL]) interval = nla_get_u32(tb[TCA_GATE_ENTRY_INTERVAL]); if (interval == 0) { NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry"); return -EINVAL; } entry->interval = interval; entry->ipv = nla_get_s32_default(tb[TCA_GATE_ENTRY_IPV], -1); entry->maxoctets = nla_get_s32_default(tb[TCA_GATE_ENTRY_MAX_OCTETS], -1); return 0; } static int parse_gate_entry(struct nlattr *n, struct tcfg_gate_entry *entry, int index, struct netlink_ext_ack *extack) { struct nlattr *tb[TCA_GATE_ENTRY_MAX + 1] = { }; int err; err = nla_parse_nested(tb, TCA_GATE_ENTRY_MAX, n, entry_policy, extack); if (err < 0) { NL_SET_ERR_MSG(extack, "Could not parse nested entry"); return -EINVAL; } entry->index = index; return fill_gate_entry(tb, entry, extack); } static void release_entry_list(struct list_head *entries) { struct tcfg_gate_entry *entry, *e; list_for_each_entry_safe(entry, e, entries, list) { list_del(&entry->list); kfree(entry); } } static int parse_gate_list(struct nlattr *list_attr, struct tcf_gate_params *sched, struct netlink_ext_ack *extack) { struct tcfg_gate_entry *entry; struct nlattr *n; int err, rem; int i = 0; if (!list_attr) return -EINVAL; nla_for_each_nested(n, list_attr, rem) { if (nla_type(n) != TCA_GATE_ONE_ENTRY) { NL_SET_ERR_MSG(extack, "Attribute isn't type 'entry'"); continue; } entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) { NL_SET_ERR_MSG(extack, "Not enough memory for entry"); err = -ENOMEM; goto release_list; } err = parse_gate_entry(n, entry, i, extack); if (err < 0) { kfree(entry); goto release_list; } list_add_tail(&entry->list, &sched->entries); i++; } sched->num_entries = i; return i; release_list: release_entry_list(&sched->entries); return err; } static void gate_setup_timer(struct tcf_gate *gact, u64 basetime, enum tk_offsets tko, s32 clockid, bool do_init) { if (!do_init) { if (basetime == gact->param.tcfg_basetime && tko == gact->tk_offset && clockid == gact->param.tcfg_clockid) return; spin_unlock_bh(&gact->tcf_lock); hrtimer_cancel(&gact->hitimer); spin_lock_bh(&gact->tcf_lock); } gact->param.tcfg_basetime = basetime; gact->param.tcfg_clockid = clockid; gact->tk_offset = tko; hrtimer_setup(&gact->hitimer, gate_timer_func, clockid, HRTIMER_MODE_ABS_SOFT); } static int tcf_gate_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, act_gate_ops.net_id); enum tk_offsets tk_offset = TK_OFFS_TAI; bool bind = flags & TCA_ACT_FLAGS_BIND; struct nlattr *tb[TCA_GATE_MAX + 1]; struct tcf_chain *goto_ch = NULL; u64 cycletime = 0, basetime = 0; struct tcf_gate_params *p; s32 clockid = CLOCK_TAI; struct tcf_gate *gact; struct tc_gate *parm; int ret = 0, err; u32 gflags = 0; s32 prio = -1; ktime_t start; u32 index; if (!nla) return -EINVAL; err = nla_parse_nested(tb, TCA_GATE_MAX, nla, gate_policy, extack); if (err < 0) return err; if (!tb[TCA_GATE_PARMS]) return -EINVAL; if (tb[TCA_GATE_CLOCKID]) { clockid = nla_get_s32(tb[TCA_GATE_CLOCKID]); switch (clockid) { case CLOCK_REALTIME: tk_offset = TK_OFFS_REAL; break; case CLOCK_MONOTONIC: tk_offset = TK_OFFS_MAX; break; case CLOCK_BOOTTIME: tk_offset = TK_OFFS_BOOT; break; case CLOCK_TAI: tk_offset = TK_OFFS_TAI; break; default: NL_SET_ERR_MSG(extack, "Invalid 'clockid'"); return -EINVAL; } } parm = nla_data(tb[TCA_GATE_PARMS]); index = parm->index; err = tcf_idr_check_alloc(tn, &index, a, bind); if (err < 0) return err; if (err && bind) return ACT_P_BOUND; if (!err) { ret = tcf_idr_create_from_flags(tn, index, est, a, &act_gate_ops, bind, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; } ret = ACT_P_CREATED; } else if (!(flags & TCA_ACT_FLAGS_REPLACE)) { tcf_idr_release(*a, bind); return -EEXIST; } if (tb[TCA_GATE_PRIORITY]) prio = nla_get_s32(tb[TCA_GATE_PRIORITY]); if (tb[TCA_GATE_BASE_TIME]) basetime = nla_get_u64(tb[TCA_GATE_BASE_TIME]); if (tb[TCA_GATE_FLAGS]) gflags = nla_get_u32(tb[TCA_GATE_FLAGS]); gact = to_gate(*a); if (ret == ACT_P_CREATED) INIT_LIST_HEAD(&gact->param.entries); err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); if (err < 0) goto release_idr; spin_lock_bh(&gact->tcf_lock); p = &gact->param; if (tb[TCA_GATE_CYCLE_TIME]) cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]); if (tb[TCA_GATE_ENTRY_LIST]) { err = parse_gate_list(tb[TCA_GATE_ENTRY_LIST], p, extack); if (err < 0) goto chain_put; } if (!cycletime) { struct tcfg_gate_entry *entry; ktime_t cycle = 0; list_for_each_entry(entry, &p->entries, list) cycle = ktime_add_ns(cycle, entry->interval); cycletime = cycle; if (!cycletime) { err = -EINVAL; goto chain_put; } } p->tcfg_cycletime = cycletime; if (tb[TCA_GATE_CYCLE_TIME_EXT]) p->tcfg_cycletime_ext = nla_get_u64(tb[TCA_GATE_CYCLE_TIME_EXT]); gate_setup_timer(gact, basetime, tk_offset, clockid, ret == ACT_P_CREATED); p->tcfg_priority = prio; p->tcfg_flags = gflags; gate_get_start_time(gact, &start); gact->current_close_time = start; gact->current_gate_status = GATE_ACT_GATE_OPEN | GATE_ACT_PENDING; gact->next_entry = list_first_entry(&p->entries, struct tcfg_gate_entry, list); goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); gate_start_timer(gact, start); spin_unlock_bh(&gact->tcf_lock); if (goto_ch) tcf_chain_put_by_act(goto_ch); return ret; chain_put: spin_unlock_bh(&gact->tcf_lock); if (goto_ch) tcf_chain_put_by_act(goto_ch); release_idr: /* action is not inserted in any list: it's safe to init hitimer * without taking tcf_lock. */ if (ret == ACT_P_CREATED) gate_setup_timer(gact, gact->param.tcfg_basetime, gact->tk_offset, gact->param.tcfg_clockid, true); tcf_idr_release(*a, bind); return err; } static void tcf_gate_cleanup(struct tc_action *a) { struct tcf_gate *gact = to_gate(a); struct tcf_gate_params *p; p = &gact->param; hrtimer_cancel(&gact->hitimer); release_entry_list(&p->entries); } static int dumping_entry(struct sk_buff *skb, struct tcfg_gate_entry *entry) { struct nlattr *item; item = nla_nest_start_noflag(skb, TCA_GATE_ONE_ENTRY); if (!item) return -ENOSPC; if (nla_put_u32(skb, TCA_GATE_ENTRY_INDEX, entry->index)) goto nla_put_failure; if (entry->gate_state && nla_put_flag(skb, TCA_GATE_ENTRY_GATE)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GATE_ENTRY_INTERVAL, entry->interval)) goto nla_put_failure; if (nla_put_s32(skb, TCA_GATE_ENTRY_MAX_OCTETS, entry->maxoctets)) goto nla_put_failure; if (nla_put_s32(skb, TCA_GATE_ENTRY_IPV, entry->ipv)) goto nla_put_failure; return nla_nest_end(skb, item); nla_put_failure: nla_nest_cancel(skb, item); return -1; } static int tcf_gate_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_gate *gact = to_gate(a); struct tc_gate opt = { .index = gact->tcf_index, .refcnt = refcount_read(&gact->tcf_refcnt) - ref, .bindcnt = atomic_read(&gact->tcf_bindcnt) - bind, }; struct tcfg_gate_entry *entry; struct tcf_gate_params *p; struct nlattr *entry_list; struct tcf_t t; spin_lock_bh(&gact->tcf_lock); opt.action = gact->tcf_action; p = &gact->param; if (nla_put(skb, TCA_GATE_PARMS, sizeof(opt), &opt)) goto nla_put_failure; if (nla_put_u64_64bit(skb, TCA_GATE_BASE_TIME, p->tcfg_basetime, TCA_GATE_PAD)) goto nla_put_failure; if (nla_put_u64_64bit(skb, TCA_GATE_CYCLE_TIME, p->tcfg_cycletime, TCA_GATE_PAD)) goto nla_put_failure; if (nla_put_u64_64bit(skb, TCA_GATE_CYCLE_TIME_EXT, p->tcfg_cycletime_ext, TCA_GATE_PAD)) goto nla_put_failure; if (nla_put_s32(skb, TCA_GATE_CLOCKID, p->tcfg_clockid)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GATE_FLAGS, p->tcfg_flags)) goto nla_put_failure; if (nla_put_s32(skb, TCA_GATE_PRIORITY, p->tcfg_priority)) goto nla_put_failure; entry_list = nla_nest_start_noflag(skb, TCA_GATE_ENTRY_LIST); if (!entry_list) goto nla_put_failure; list_for_each_entry(entry, &p->entries, list) { if (dumping_entry(skb, entry) < 0) goto nla_put_failure; } nla_nest_end(skb, entry_list); tcf_tm_dump(&t, &gact->tcf_tm); if (nla_put_64bit(skb, TCA_GATE_TM, sizeof(t), &t, TCA_GATE_PAD)) goto nla_put_failure; spin_unlock_bh(&gact->tcf_lock); return skb->len; nla_put_failure: spin_unlock_bh(&gact->tcf_lock); nlmsg_trim(skb, b); return -1; } static void tcf_gate_stats_update(struct tc_action *a, u64 bytes, u64 packets, u64 drops, u64 lastuse, bool hw) { struct tcf_gate *gact = to_gate(a); struct tcf_t *tm = &gact->tcf_tm; tcf_action_update_stats(a, bytes, packets, drops, hw); tm->lastuse = max_t(u64, tm->lastuse, lastuse); } static size_t tcf_gate_get_fill_size(const struct tc_action *act) { return nla_total_size(sizeof(struct tc_gate)); } static void tcf_gate_entry_destructor(void *priv) { struct action_gate_entry *oe = priv; kfree(oe); } static int tcf_gate_get_entries(struct flow_action_entry *entry, const struct tc_action *act) { entry->gate.entries = tcf_gate_get_list(act); if (!entry->gate.entries) return -EINVAL; entry->destructor = tcf_gate_entry_destructor; entry->destructor_priv = entry->gate.entries; return 0; } static int tcf_gate_offload_act_setup(struct tc_action *act, void *entry_data, u32 *index_inc, bool bind, struct netlink_ext_ack *extack) { int err; if (bind) { struct flow_action_entry *entry = entry_data; entry->id = FLOW_ACTION_GATE; entry->gate.prio = tcf_gate_prio(act); entry->gate.basetime = tcf_gate_basetime(act); entry->gate.cycletime = tcf_gate_cycletime(act); entry->gate.cycletimeext = tcf_gate_cycletimeext(act); entry->gate.num_entries = tcf_gate_num_entries(act); err = tcf_gate_get_entries(entry, act); if (err) return err; *index_inc = 1; } else { struct flow_offload_action *fl_action = entry_data; fl_action->id = FLOW_ACTION_GATE; } return 0; } static struct tc_action_ops act_gate_ops = { .kind = "gate", .id = TCA_ID_GATE, .owner = THIS_MODULE, .act = tcf_gate_act, .dump = tcf_gate_dump, .init = tcf_gate_init, .cleanup = tcf_gate_cleanup, .stats_update = tcf_gate_stats_update, .get_fill_size = tcf_gate_get_fill_size, .offload_act_setup = tcf_gate_offload_act_setup, .size = sizeof(struct tcf_gate), }; MODULE_ALIAS_NET_ACT("gate"); static __net_init int gate_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, act_gate_ops.net_id); return tc_action_net_init(net, tn, &act_gate_ops); } static void __net_exit gate_exit_net(struct list_head *net_list) { tc_action_net_exit(net_list, act_gate_ops.net_id); } static struct pernet_operations gate_net_ops = { .init = gate_init_net, .exit_batch = gate_exit_net, .id = &act_gate_ops.net_id, .size = sizeof(struct tc_action_net), }; static int __init gate_init_module(void) { return tcf_register_action(&act_gate_ops, &gate_net_ops); } static void __exit gate_cleanup_module(void) { tcf_unregister_action(&act_gate_ops, &gate_net_ops); } module_init(gate_init_module); module_exit(gate_cleanup_module); MODULE_DESCRIPTION("TC gate action"); MODULE_LICENSE("GPL v2");
33 122 33 155 137 137 21 155 155 155 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 // SPDX-License-Identifier: GPL-2.0-or-later /************************************************************ * EFI GUID Partition Table handling * * http://www.uefi.org/specs/ * http://www.intel.com/technology/efi/ * * efi.[ch] by Matt Domsch <Matt_Domsch@dell.com> * Copyright 2000,2001,2002,2004 Dell Inc. * * TODO: * * Changelog: * Mon August 5th, 2013 Davidlohr Bueso <davidlohr@hp.com> * - detect hybrid MBRs, tighter pMBR checking & cleanups. * * Mon Nov 09 2004 Matt Domsch <Matt_Domsch@dell.com> * - test for valid PMBR and valid PGPT before ever reading * AGPT, allow override with 'gpt' kernel command line option. * - check for first/last_usable_lba outside of size of disk * * Tue Mar 26 2002 Matt Domsch <Matt_Domsch@dell.com> * - Ported to 2.5.7-pre1 and 2.5.7-dj2 * - Applied patch to avoid fault in alternate header handling * - cleaned up find_valid_gpt * - On-disk structure and copy in memory is *always* LE now - * swab fields as needed * - remove print_gpt_header() * - only use first max_p partition entries, to keep the kernel minor number * and partition numbers tied. * * Mon Feb 04 2002 Matt Domsch <Matt_Domsch@dell.com> * - Removed __PRIPTR_PREFIX - not being used * * Mon Jan 14 2002 Matt Domsch <Matt_Domsch@dell.com> * - Ported to 2.5.2-pre11 + library crc32 patch Linus applied * * Thu Dec 6 2001 Matt Domsch <Matt_Domsch@dell.com> * - Added compare_gpts(). * - moved le_efi_guid_to_cpus() back into this file. GPT is the only * thing that keeps EFI GUIDs on disk. * - Changed gpt structure names and members to be simpler and more Linux-like. * * Wed Oct 17 2001 Matt Domsch <Matt_Domsch@dell.com> * - Removed CONFIG_DEVFS_VOLUMES_UUID code entirely per Martin Wilck * * Wed Oct 10 2001 Matt Domsch <Matt_Domsch@dell.com> * - Changed function comments to DocBook style per Andreas Dilger suggestion. * * Mon Oct 08 2001 Matt Domsch <Matt_Domsch@dell.com> * - Change read_lba() to use the page cache per Al Viro's work. * - print u64s properly on all architectures * - fixed debug_printk(), now Dprintk() * * Mon Oct 01 2001 Matt Domsch <Matt_Domsch@dell.com> * - Style cleanups * - made most functions static * - Endianness addition * - remove test for second alternate header, as it's not per spec, * and is unnecessary. There's now a method to read/write the last * sector of an odd-sized disk from user space. No tools have ever * been released which used this code, so it's effectively dead. * - Per Asit Mallick of Intel, added a test for a valid PMBR. * - Added kernel command line option 'gpt' to override valid PMBR test. * * Wed Jun 6 2001 Martin Wilck <Martin.Wilck@Fujitsu-Siemens.com> * - added devfs volume UUID support (/dev/volumes/uuids) for * mounting file systems by the partition GUID. * * Tue Dec 5 2000 Matt Domsch <Matt_Domsch@dell.com> * - Moved crc32() to linux/lib, added efi_crc32(). * * Thu Nov 30 2000 Matt Domsch <Matt_Domsch@dell.com> * - Replaced Intel's CRC32 function with an equivalent * non-license-restricted version. * * Wed Oct 25 2000 Matt Domsch <Matt_Domsch@dell.com> * - Fixed the last_lba() call to return the proper last block * * Thu Oct 12 2000 Matt Domsch <Matt_Domsch@dell.com> * - Thanks to Andries Brouwer for his debugging assistance. * - Code works, detects all the partitions. * ************************************************************/ #include <linux/kernel.h> #include <linux/crc32.h> #include <linux/ctype.h> #include <linux/math64.h> #include <linux/slab.h> #include "check.h" #include "efi.h" /* This allows a kernel command line option 'gpt' to override * the test for invalid PMBR. Not __initdata because reloading * the partition tables happens after init too. */ static int force_gpt; static int __init force_gpt_fn(char *str) { force_gpt = 1; return 1; } __setup("gpt", force_gpt_fn); /** * efi_crc32() - EFI version of crc32 function * @buf: buffer to calculate crc32 of * @len: length of buf * * Description: Returns EFI-style CRC32 value for @buf * * This function uses the little endian Ethernet polynomial * but seeds the function with ~0, and xor's with ~0 at the end. * Note, the EFI Specification, v1.02, has a reference to * Dr. Dobbs Journal, May 1994 (actually it's in May 1992). */ static inline u32 efi_crc32(const void *buf, unsigned long len) { return (crc32(~0L, buf, len) ^ ~0L); } /** * last_lba(): return number of last logical block of device * @disk: block device * * Description: Returns last LBA value on success, 0 on error. * This is stored (by sd and ide-geometry) in * the part[0] entry for this disk, and is the number of * physical sectors available on the disk. */ static u64 last_lba(struct gendisk *disk) { return div_u64(bdev_nr_bytes(disk->part0), queue_logical_block_size(disk->queue)) - 1ULL; } static inline int pmbr_part_valid(gpt_mbr_record *part) { if (part->os_type != EFI_PMBR_OSTYPE_EFI_GPT) goto invalid; /* set to 0x00000001 (i.e., the LBA of the GPT Partition Header) */ if (le32_to_cpu(part->starting_lba) != GPT_PRIMARY_PARTITION_TABLE_LBA) goto invalid; return GPT_MBR_PROTECTIVE; invalid: return 0; } /** * is_pmbr_valid(): test Protective MBR for validity * @mbr: pointer to a legacy mbr structure * @total_sectors: amount of sectors in the device * * Description: Checks for a valid protective or hybrid * master boot record (MBR). The validity of a pMBR depends * on all of the following properties: * 1) MSDOS signature is in the last two bytes of the MBR * 2) One partition of type 0xEE is found * * In addition, a hybrid MBR will have up to three additional * primary partitions, which point to the same space that's * marked out by up to three GPT partitions. * * Returns 0 upon invalid MBR, or GPT_MBR_PROTECTIVE or * GPT_MBR_HYBRID depending on the device layout. */ static int is_pmbr_valid(legacy_mbr *mbr, sector_t total_sectors) { uint32_t sz = 0; int i, part = 0, ret = 0; /* invalid by default */ if (!mbr || le16_to_cpu(mbr->signature) != MSDOS_MBR_SIGNATURE) goto done; for (i = 0; i < 4; i++) { ret = pmbr_part_valid(&mbr->partition_record[i]); if (ret == GPT_MBR_PROTECTIVE) { part = i; /* * Ok, we at least know that there's a protective MBR, * now check if there are other partition types for * hybrid MBR. */ goto check_hybrid; } } if (ret != GPT_MBR_PROTECTIVE) goto done; check_hybrid: for (i = 0; i < 4; i++) if ((mbr->partition_record[i].os_type != EFI_PMBR_OSTYPE_EFI_GPT) && (mbr->partition_record[i].os_type != 0x00)) ret = GPT_MBR_HYBRID; /* * Protective MBRs take up the lesser of the whole disk * or 2 TiB (32bit LBA), ignoring the rest of the disk. * Some partitioning programs, nonetheless, choose to set * the size to the maximum 32-bit limitation, disregarding * the disk size. * * Hybrid MBRs do not necessarily comply with this. * * Consider a bad value here to be a warning to support dd'ing * an image from a smaller disk to a larger disk. */ if (ret == GPT_MBR_PROTECTIVE) { sz = le32_to_cpu(mbr->partition_record[part].size_in_lba); if (sz != (uint32_t) total_sectors - 1 && sz != 0xFFFFFFFF) pr_debug("GPT: mbr size in lba (%u) different than whole disk (%u).\n", sz, (uint32_t)min(total_sectors - 1, 0xFFFFFFFF)); } done: return ret; } /** * read_lba(): Read bytes from disk, starting at given LBA * @state: disk parsed partitions * @lba: the Logical Block Address of the partition table * @buffer: destination buffer * @count: bytes to read * * Description: Reads @count bytes from @state->disk into @buffer. * Returns number of bytes read on success, 0 on error. */ static size_t read_lba(struct parsed_partitions *state, u64 lba, u8 *buffer, size_t count) { size_t totalreadcount = 0; sector_t n = lba * (queue_logical_block_size(state->disk->queue) / 512); if (!buffer || lba > last_lba(state->disk)) return 0; while (count) { int copied = 512; Sector sect; unsigned char *data = read_part_sector(state, n++, &sect); if (!data) break; if (copied > count) copied = count; memcpy(buffer, data, copied); put_dev_sector(sect); buffer += copied; totalreadcount +=copied; count -= copied; } return totalreadcount; } /** * alloc_read_gpt_entries(): reads partition entries from disk * @state: disk parsed partitions * @gpt: GPT header * * Description: Returns ptes on success, NULL on error. * Allocates space for PTEs based on information found in @gpt. * Notes: remember to free pte when you're done! */ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state, gpt_header *gpt) { size_t count; gpt_entry *pte; if (!gpt) return NULL; count = (size_t)le32_to_cpu(gpt->num_partition_entries) * le32_to_cpu(gpt->sizeof_partition_entry); if (!count) return NULL; pte = kmalloc(count, GFP_KERNEL); if (!pte) return NULL; if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba), (u8 *) pte, count) < count) { kfree(pte); pte=NULL; return NULL; } return pte; } /** * alloc_read_gpt_header(): Allocates GPT header, reads into it from disk * @state: disk parsed partitions * @lba: the Logical Block Address of the partition table * * Description: returns GPT header on success, NULL on error. Allocates * and fills a GPT header starting at @ from @state->disk. * Note: remember to free gpt when finished with it. */ static gpt_header *alloc_read_gpt_header(struct parsed_partitions *state, u64 lba) { gpt_header *gpt; unsigned ssz = queue_logical_block_size(state->disk->queue); gpt = kmalloc(ssz, GFP_KERNEL); if (!gpt) return NULL; if (read_lba(state, lba, (u8 *) gpt, ssz) < ssz) { kfree(gpt); gpt=NULL; return NULL; } return gpt; } /** * is_gpt_valid() - tests one GPT header and PTEs for validity * @state: disk parsed partitions * @lba: logical block address of the GPT header to test * @gpt: GPT header ptr, filled on return. * @ptes: PTEs ptr, filled on return. * * Description: returns 1 if valid, 0 on error. * If valid, returns pointers to newly allocated GPT header and PTEs. */ static int is_gpt_valid(struct parsed_partitions *state, u64 lba, gpt_header **gpt, gpt_entry **ptes) { u32 crc, origcrc; u64 lastlba, pt_size; if (!ptes) return 0; if (!(*gpt = alloc_read_gpt_header(state, lba))) return 0; /* Check the GUID Partition Table signature */ if (le64_to_cpu((*gpt)->signature) != GPT_HEADER_SIGNATURE) { pr_debug("GUID Partition Table Header signature is wrong:" "%lld != %lld\n", (unsigned long long)le64_to_cpu((*gpt)->signature), (unsigned long long)GPT_HEADER_SIGNATURE); goto fail; } /* Check the GUID Partition Table header size is too big */ if (le32_to_cpu((*gpt)->header_size) > queue_logical_block_size(state->disk->queue)) { pr_debug("GUID Partition Table Header size is too large: %u > %u\n", le32_to_cpu((*gpt)->header_size), queue_logical_block_size(state->disk->queue)); goto fail; } /* Check the GUID Partition Table header size is too small */ if (le32_to_cpu((*gpt)->header_size) < sizeof(gpt_header)) { pr_debug("GUID Partition Table Header size is too small: %u < %zu\n", le32_to_cpu((*gpt)->header_size), sizeof(gpt_header)); goto fail; } /* Check the GUID Partition Table CRC */ origcrc = le32_to_cpu((*gpt)->header_crc32); (*gpt)->header_crc32 = 0; crc = efi_crc32((const unsigned char *) (*gpt), le32_to_cpu((*gpt)->header_size)); if (crc != origcrc) { pr_debug("GUID Partition Table Header CRC is wrong: %x != %x\n", crc, origcrc); goto fail; } (*gpt)->header_crc32 = cpu_to_le32(origcrc); /* Check that the my_lba entry points to the LBA that contains * the GUID Partition Table */ if (le64_to_cpu((*gpt)->my_lba) != lba) { pr_debug("GPT my_lba incorrect: %lld != %lld\n", (unsigned long long)le64_to_cpu((*gpt)->my_lba), (unsigned long long)lba); goto fail; } /* Check the first_usable_lba and last_usable_lba are * within the disk. */ lastlba = last_lba(state->disk); if (le64_to_cpu((*gpt)->first_usable_lba) > lastlba) { pr_debug("GPT: first_usable_lba incorrect: %lld > %lld\n", (unsigned long long)le64_to_cpu((*gpt)->first_usable_lba), (unsigned long long)lastlba); goto fail; } if (le64_to_cpu((*gpt)->last_usable_lba) > lastlba) { pr_debug("GPT: last_usable_lba incorrect: %lld > %lld\n", (unsigned long long)le64_to_cpu((*gpt)->last_usable_lba), (unsigned long long)lastlba); goto fail; } if (le64_to_cpu((*gpt)->last_usable_lba) < le64_to_cpu((*gpt)->first_usable_lba)) { pr_debug("GPT: last_usable_lba incorrect: %lld > %lld\n", (unsigned long long)le64_to_cpu((*gpt)->last_usable_lba), (unsigned long long)le64_to_cpu((*gpt)->first_usable_lba)); goto fail; } /* Check that sizeof_partition_entry has the correct value */ if (le32_to_cpu((*gpt)->sizeof_partition_entry) != sizeof(gpt_entry)) { pr_debug("GUID Partition Entry Size check failed.\n"); goto fail; } /* Sanity check partition table size */ pt_size = (u64)le32_to_cpu((*gpt)->num_partition_entries) * le32_to_cpu((*gpt)->sizeof_partition_entry); if (pt_size > KMALLOC_MAX_SIZE) { pr_debug("GUID Partition Table is too large: %llu > %lu bytes\n", (unsigned long long)pt_size, KMALLOC_MAX_SIZE); goto fail; } if (!(*ptes = alloc_read_gpt_entries(state, *gpt))) goto fail; /* Check the GUID Partition Entry Array CRC */ crc = efi_crc32((const unsigned char *) (*ptes), pt_size); if (crc != le32_to_cpu((*gpt)->partition_entry_array_crc32)) { pr_debug("GUID Partition Entry Array CRC check failed.\n"); goto fail_ptes; } /* We're done, all's well */ return 1; fail_ptes: kfree(*ptes); *ptes = NULL; fail: kfree(*gpt); *gpt = NULL; return 0; } /** * is_pte_valid() - tests one PTE for validity * @pte:pte to check * @lastlba: last lba of the disk * * Description: returns 1 if valid, 0 on error. */ static inline int is_pte_valid(const gpt_entry *pte, const u64 lastlba) { if ((!efi_guidcmp(pte->partition_type_guid, NULL_GUID)) || le64_to_cpu(pte->starting_lba) > lastlba || le64_to_cpu(pte->ending_lba) > lastlba) return 0; return 1; } /** * compare_gpts() - Search disk for valid GPT headers and PTEs * @pgpt: primary GPT header * @agpt: alternate GPT header * @lastlba: last LBA number * * Description: Returns nothing. Sanity checks pgpt and agpt fields * and prints warnings on discrepancies. * */ static void compare_gpts(gpt_header *pgpt, gpt_header *agpt, u64 lastlba) { int error_found = 0; if (!pgpt || !agpt) return; if (le64_to_cpu(pgpt->my_lba) != le64_to_cpu(agpt->alternate_lba)) { pr_warn("GPT:Primary header LBA != Alt. header alternate_lba\n"); pr_warn("GPT:%lld != %lld\n", (unsigned long long)le64_to_cpu(pgpt->my_lba), (unsigned long long)le64_to_cpu(agpt->alternate_lba)); error_found++; } if (le64_to_cpu(pgpt->alternate_lba) != le64_to_cpu(agpt->my_lba)) { pr_warn("GPT:Primary header alternate_lba != Alt. header my_lba\n"); pr_warn("GPT:%lld != %lld\n", (unsigned long long)le64_to_cpu(pgpt->alternate_lba), (unsigned long long)le64_to_cpu(agpt->my_lba)); error_found++; } if (le64_to_cpu(pgpt->first_usable_lba) != le64_to_cpu(agpt->first_usable_lba)) { pr_warn("GPT:first_usable_lbas don't match.\n"); pr_warn("GPT:%lld != %lld\n", (unsigned long long)le64_to_cpu(pgpt->first_usable_lba), (unsigned long long)le64_to_cpu(agpt->first_usable_lba)); error_found++; } if (le64_to_cpu(pgpt->last_usable_lba) != le64_to_cpu(agpt->last_usable_lba)) { pr_warn("GPT:last_usable_lbas don't match.\n"); pr_warn("GPT:%lld != %lld\n", (unsigned long long)le64_to_cpu(pgpt->last_usable_lba), (unsigned long long)le64_to_cpu(agpt->last_usable_lba)); error_found++; } if (efi_guidcmp(pgpt->disk_guid, agpt->disk_guid)) { pr_warn("GPT:disk_guids don't match.\n"); error_found++; } if (le32_to_cpu(pgpt->num_partition_entries) != le32_to_cpu(agpt->num_partition_entries)) { pr_warn("GPT:num_partition_entries don't match: " "0x%x != 0x%x\n", le32_to_cpu(pgpt->num_partition_entries), le32_to_cpu(agpt->num_partition_entries)); error_found++; } if (le32_to_cpu(pgpt->sizeof_partition_entry) != le32_to_cpu(agpt->sizeof_partition_entry)) { pr_warn("GPT:sizeof_partition_entry values don't match: " "0x%x != 0x%x\n", le32_to_cpu(pgpt->sizeof_partition_entry), le32_to_cpu(agpt->sizeof_partition_entry)); error_found++; } if (le32_to_cpu(pgpt->partition_entry_array_crc32) != le32_to_cpu(agpt->partition_entry_array_crc32)) { pr_warn("GPT:partition_entry_array_crc32 values don't match: " "0x%x != 0x%x\n", le32_to_cpu(pgpt->partition_entry_array_crc32), le32_to_cpu(agpt->partition_entry_array_crc32)); error_found++; } if (le64_to_cpu(pgpt->alternate_lba) != lastlba) { pr_warn("GPT:Primary header thinks Alt. header is not at the end of the disk.\n"); pr_warn("GPT:%lld != %lld\n", (unsigned long long)le64_to_cpu(pgpt->alternate_lba), (unsigned long long)lastlba); error_found++; } if (le64_to_cpu(agpt->my_lba) != lastlba) { pr_warn("GPT:Alternate GPT header not at the end of the disk.\n"); pr_warn("GPT:%lld != %lld\n", (unsigned long long)le64_to_cpu(agpt->my_lba), (unsigned long long)lastlba); error_found++; } if (error_found) pr_warn("GPT: Use GNU Parted to correct GPT errors.\n"); return; } /** * find_valid_gpt() - Search disk for valid GPT headers and PTEs * @state: disk parsed partitions * @gpt: GPT header ptr, filled on return. * @ptes: PTEs ptr, filled on return. * * Description: Returns 1 if valid, 0 on error. * If valid, returns pointers to newly allocated GPT header and PTEs. * Validity depends on PMBR being valid (or being overridden by the * 'gpt' kernel command line option) and finding either the Primary * GPT header and PTEs valid, or the Alternate GPT header and PTEs * valid. If the Primary GPT header is not valid, the Alternate GPT header * is not checked unless the 'gpt' kernel command line option is passed. * This protects against devices which misreport their size, and forces * the user to decide to use the Alternate GPT. */ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt, gpt_entry **ptes) { int good_pgpt = 0, good_agpt = 0, good_pmbr = 0; gpt_header *pgpt = NULL, *agpt = NULL; gpt_entry *pptes = NULL, *aptes = NULL; legacy_mbr *legacymbr; struct gendisk *disk = state->disk; const struct block_device_operations *fops = disk->fops; sector_t total_sectors = get_capacity(state->disk); u64 lastlba; if (!ptes) return 0; lastlba = last_lba(state->disk); if (!force_gpt) { /* This will be added to the EFI Spec. per Intel after v1.02. */ legacymbr = kzalloc(sizeof(*legacymbr), GFP_KERNEL); if (!legacymbr) goto fail; read_lba(state, 0, (u8 *)legacymbr, sizeof(*legacymbr)); good_pmbr = is_pmbr_valid(legacymbr, total_sectors); kfree(legacymbr); if (!good_pmbr) goto fail; pr_debug("Device has a %s MBR\n", good_pmbr == GPT_MBR_PROTECTIVE ? "protective" : "hybrid"); } good_pgpt = is_gpt_valid(state, GPT_PRIMARY_PARTITION_TABLE_LBA, &pgpt, &pptes); if (good_pgpt) good_agpt = is_gpt_valid(state, le64_to_cpu(pgpt->alternate_lba), &agpt, &aptes); if (!good_agpt && force_gpt) good_agpt = is_gpt_valid(state, lastlba, &agpt, &aptes); if (!good_agpt && force_gpt && fops->alternative_gpt_sector) { sector_t agpt_sector; int err; err = fops->alternative_gpt_sector(disk, &agpt_sector); if (!err) good_agpt = is_gpt_valid(state, agpt_sector, &agpt, &aptes); } /* The obviously unsuccessful case */ if (!good_pgpt && !good_agpt) goto fail; compare_gpts(pgpt, agpt, lastlba); /* The good cases */ if (good_pgpt) { *gpt = pgpt; *ptes = pptes; kfree(agpt); kfree(aptes); if (!good_agpt) pr_warn("Alternate GPT is invalid, using primary GPT.\n"); return 1; } else if (good_agpt) { *gpt = agpt; *ptes = aptes; kfree(pgpt); kfree(pptes); pr_warn("Primary GPT is invalid, using alternate GPT.\n"); return 1; } fail: kfree(pgpt); kfree(agpt); kfree(pptes); kfree(aptes); *gpt = NULL; *ptes = NULL; return 0; } /** * utf16_le_to_7bit(): Naively converts a UTF-16LE string to 7-bit ASCII characters * @in: input UTF-16LE string * @size: size of the input string * @out: output string ptr, should be capable to store @size+1 characters * * Description: Converts @size UTF16-LE symbols from @in string to 7-bit * ASCII characters and stores them to @out. Adds trailing zero to @out array. */ static void utf16_le_to_7bit(const __le16 *in, unsigned int size, u8 *out) { unsigned int i = 0; out[size] = 0; while (i < size) { u8 c = le16_to_cpu(in[i]) & 0x7f; if (c && !isprint(c)) c = '!'; out[i] = c; i++; } } /** * efi_partition - scan for GPT partitions * @state: disk parsed partitions * * Description: called from check.c, if the disk contains GPT * partitions, sets up partition entries in the kernel. * * If the first block on the disk is a legacy MBR, * it will get handled by msdos_partition(). * If it's a Protective MBR, we'll handle it here. * * We do not create a Linux partition for GPT, but * only for the actual data partitions. * Returns: * -1 if unable to read the partition table * 0 if this isn't our partition table * 1 if successful * */ int efi_partition(struct parsed_partitions *state) { gpt_header *gpt = NULL; gpt_entry *ptes = NULL; u32 i; unsigned ssz = queue_logical_block_size(state->disk->queue) / 512; if (!find_valid_gpt(state, &gpt, &ptes) || !gpt || !ptes) { kfree(gpt); kfree(ptes); return 0; } pr_debug("GUID Partition Table is valid! Yea!\n"); for (i = 0; i < le32_to_cpu(gpt->num_partition_entries) && i < state->limit-1; i++) { struct partition_meta_info *info; unsigned label_max; u64 start = le64_to_cpu(ptes[i].starting_lba); u64 size = le64_to_cpu(ptes[i].ending_lba) - le64_to_cpu(ptes[i].starting_lba) + 1ULL; if (!is_pte_valid(&ptes[i], last_lba(state->disk))) continue; put_partition(state, i+1, start * ssz, size * ssz); /* If this is a RAID volume, tell md */ if (!efi_guidcmp(ptes[i].partition_type_guid, PARTITION_LINUX_RAID_GUID)) state->parts[i + 1].flags = ADDPART_FLAG_RAID; info = &state->parts[i + 1].info; efi_guid_to_str(&ptes[i].unique_partition_guid, info->uuid); /* Naively convert UTF16-LE to 7 bits. */ label_max = min(ARRAY_SIZE(info->volname) - 1, ARRAY_SIZE(ptes[i].partition_name)); utf16_le_to_7bit(ptes[i].partition_name, label_max, info->volname); state->parts[i + 1].has_info = true; } kfree(ptes); kfree(gpt); strlcat(state->pp_buf, "\n", PAGE_SIZE); return 1; }
202 202 202 219 220 219 12 21 5 63 22 20 1 574 574 571 571 63 50 42 568 572 574 20 6 11 3 17 2 4 7 7 7 4 120 120 121 121 22 20 121 44 45 45 5 44 44 33 1 2 1 1 30 29 30 30 18 12 30 5 9 11 6 9 8 16 1 26 9 64 14 26 33 25 22 157 1 1 1 111 4 4 47 103 18 42 21 123 123 114 10 122 123 2 18 2 10 140 46 33 13 86 10 4 140 139 38 102 36 123 91 33 69 11 8 12 65 122 22 21 8 29 20 20 10 1 9 1 9 8 22 1 2 8 3 12 2 12 5 7 7 5 8 4 17 9 8 2 2 1 1 1 1 2 252 247 4 1 1 4 46 40 6 25 2 4 2 19 5 14 14 9 3 9 11 7 14 14 14 14 9 14 5 6 6 5 178 178 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 // SPDX-License-Identifier: GPL-2.0-or-later /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * RAW - implementation of IP "raw" sockets. * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * * Fixes: * Alan Cox : verify_area() fixed up * Alan Cox : ICMP error handling * Alan Cox : EMSGSIZE if you send too big a packet * Alan Cox : Now uses generic datagrams and shared * skbuff library. No more peek crashes, * no more backlogs * Alan Cox : Checks sk->broadcast. * Alan Cox : Uses skb_free_datagram/skb_copy_datagram * Alan Cox : Raw passes ip options too * Alan Cox : Setsocketopt added * Alan Cox : Fixed error return for broadcasts * Alan Cox : Removed wake_up calls * Alan Cox : Use ttl/tos * Alan Cox : Cleaned up old debugging * Alan Cox : Use new kernel side addresses * Arnt Gulbrandsen : Fixed MSG_DONTROUTE in raw sockets. * Alan Cox : BSD style RAW socket demultiplexing. * Alan Cox : Beginnings of mrouted support. * Alan Cox : Added IP_HDRINCL option. * Alan Cox : Skip broadcast check if BSDism set. * David S. Miller : New socket lookup architecture. */ #include <linux/types.h> #include <linux/atomic.h> #include <asm/byteorder.h> #include <asm/current.h> #include <linux/uaccess.h> #include <asm/ioctls.h> #include <linux/stddef.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/spinlock.h> #include <linux/sockios.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/mroute.h> #include <linux/netdevice.h> #include <linux/in_route.h> #include <linux/route.h> #include <linux/skbuff.h> #include <linux/igmp.h> #include <net/net_namespace.h> #include <net/dst.h> #include <net/sock.h> #include <linux/ip.h> #include <linux/net.h> #include <net/ip.h> #include <net/icmp.h> #include <net/udp.h> #include <net/raw.h> #include <net/snmp.h> #include <net/tcp_states.h> #include <net/inet_common.h> #include <net/checksum.h> #include <net/xfrm.h> #include <linux/rtnetlink.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <linux/compat.h> #include <linux/uio.h> struct raw_frag_vec { struct msghdr *msg; union { struct icmphdr icmph; char c[1]; } hdr; int hlen; }; struct raw_hashinfo raw_v4_hashinfo; EXPORT_SYMBOL_GPL(raw_v4_hashinfo); int raw_hash_sk(struct sock *sk) { struct raw_hashinfo *h = sk->sk_prot->h.raw_hash; struct hlist_head *hlist; hlist = &h->ht[raw_hashfunc(sock_net(sk), inet_sk(sk)->inet_num)]; spin_lock(&h->lock); sk_add_node_rcu(sk, hlist); sock_set_flag(sk, SOCK_RCU_FREE); spin_unlock(&h->lock); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); return 0; } EXPORT_SYMBOL_GPL(raw_hash_sk); void raw_unhash_sk(struct sock *sk) { struct raw_hashinfo *h = sk->sk_prot->h.raw_hash; spin_lock(&h->lock); if (sk_del_node_init_rcu(sk)) sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); spin_unlock(&h->lock); } EXPORT_SYMBOL_GPL(raw_unhash_sk); bool raw_v4_match(struct net *net, const struct sock *sk, unsigned short num, __be32 raddr, __be32 laddr, int dif, int sdif) { const struct inet_sock *inet = inet_sk(sk); if (net_eq(sock_net(sk), net) && inet->inet_num == num && !(inet->inet_daddr && inet->inet_daddr != raddr) && !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) && raw_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif)) return true; return false; } EXPORT_SYMBOL_GPL(raw_v4_match); /* * 0 - deliver * 1 - block */ static int icmp_filter(const struct sock *sk, const struct sk_buff *skb) { struct icmphdr _hdr; const struct icmphdr *hdr; hdr = skb_header_pointer(skb, skb_transport_offset(skb), sizeof(_hdr), &_hdr); if (!hdr) return 1; if (hdr->type < 32) { __u32 data = raw_sk(sk)->filter.data; return ((1U << hdr->type) & data) != 0; } /* Do not block unknown ICMP types */ return 0; } /* IP input processing comes here for RAW socket delivery. * Caller owns SKB, so we must make clones. * * RFC 1122: SHOULD pass TOS value up to the transport layer. * -> It does. And not only TOS, but all IP header. */ static int raw_v4_input(struct net *net, struct sk_buff *skb, const struct iphdr *iph, int hash) { int sdif = inet_sdif(skb); struct hlist_head *hlist; int dif = inet_iif(skb); int delivered = 0; struct sock *sk; hlist = &raw_v4_hashinfo.ht[hash]; rcu_read_lock(); sk_for_each_rcu(sk, hlist) { if (!raw_v4_match(net, sk, iph->protocol, iph->saddr, iph->daddr, dif, sdif)) continue; if (atomic_read(&sk->sk_rmem_alloc) >= READ_ONCE(sk->sk_rcvbuf)) { sk_drops_inc(sk); continue; } delivered = 1; if ((iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) && ip_mc_sf_allow(sk, iph->daddr, iph->saddr, skb->dev->ifindex, sdif)) { struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); /* Not releasing hash table! */ if (clone) raw_rcv(sk, clone); } } rcu_read_unlock(); return delivered; } int raw_local_deliver(struct sk_buff *skb, int protocol) { struct net *net = dev_net(skb->dev); return raw_v4_input(net, skb, ip_hdr(skb), raw_hashfunc(net, protocol)); } static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info) { struct inet_sock *inet = inet_sk(sk); const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; int harderr = 0; bool recverr; int err = 0; if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) ipv4_sk_update_pmtu(skb, sk, info); else if (type == ICMP_REDIRECT) { ipv4_sk_redirect(skb, sk); return; } /* Report error on raw socket, if: 1. User requested ip_recverr. 2. Socket is connected (otherwise the error indication is useless without ip_recverr and error is hard. */ recverr = inet_test_bit(RECVERR, sk); if (!recverr && sk->sk_state != TCP_ESTABLISHED) return; switch (type) { default: case ICMP_TIME_EXCEEDED: err = EHOSTUNREACH; break; case ICMP_SOURCE_QUENCH: return; case ICMP_PARAMETERPROB: err = EPROTO; harderr = 1; break; case ICMP_DEST_UNREACH: err = EHOSTUNREACH; if (code > NR_ICMP_UNREACH) break; if (code == ICMP_FRAG_NEEDED) { harderr = READ_ONCE(inet->pmtudisc) != IP_PMTUDISC_DONT; err = EMSGSIZE; } else { err = icmp_err_convert[code].errno; harderr = icmp_err_convert[code].fatal; } } if (recverr) { const struct iphdr *iph = (const struct iphdr *)skb->data; u8 *payload = skb->data + (iph->ihl << 2); if (inet_test_bit(HDRINCL, sk)) payload = skb->data; ip_icmp_error(sk, skb, err, 0, info, payload); } if (recverr || harderr) { sk->sk_err = err; sk_error_report(sk); } } void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info) { struct net *net = dev_net(skb->dev); int dif = skb->dev->ifindex; int sdif = inet_sdif(skb); struct hlist_head *hlist; const struct iphdr *iph; struct sock *sk; int hash; hash = raw_hashfunc(net, protocol); hlist = &raw_v4_hashinfo.ht[hash]; rcu_read_lock(); sk_for_each_rcu(sk, hlist) { iph = (const struct iphdr *)skb->data; if (!raw_v4_match(net, sk, iph->protocol, iph->daddr, iph->saddr, dif, sdif)) continue; raw_err(sk, skb, info); } rcu_read_unlock(); } static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb) { enum skb_drop_reason reason; /* Charge it to the socket. */ ipv4_pktinfo_prepare(sk, skb, true); if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0) { sk_skb_reason_drop(sk, skb, reason); return NET_RX_DROP; } return NET_RX_SUCCESS; } int raw_rcv(struct sock *sk, struct sk_buff *skb) { if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) { sk_drops_inc(sk); sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_XFRM_POLICY); return NET_RX_DROP; } nf_reset_ct(skb); skb_push(skb, -skb_network_offset(skb)); raw_rcv_skb(sk, skb); return 0; } static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, struct msghdr *msg, size_t length, struct rtable **rtp, unsigned int flags, const struct sockcm_cookie *sockc) { struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); struct iphdr *iph; struct sk_buff *skb; unsigned int iphlen; int err; struct rtable *rt = *rtp; int hlen, tlen; if (length > rt->dst.dev->mtu) { ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, rt->dst.dev->mtu); return -EMSGSIZE; } if (length < sizeof(struct iphdr)) return -EINVAL; if (flags&MSG_PROBE) goto out; hlen = LL_RESERVED_SPACE(rt->dst.dev); tlen = rt->dst.dev->needed_tailroom; skb = sock_alloc_send_skb(sk, length + hlen + tlen + 15, flags & MSG_DONTWAIT, &err); if (!skb) goto error; skb_reserve(skb, hlen); skb->protocol = htons(ETH_P_IP); skb->priority = sockc->priority; skb->mark = sockc->mark; skb_set_delivery_type_by_clockid(skb, sockc->transmit_time, sk->sk_clockid); skb_dst_set(skb, &rt->dst); *rtp = NULL; skb_reset_network_header(skb); iph = ip_hdr(skb); skb_put(skb, length); skb->ip_summed = CHECKSUM_NONE; skb_setup_tx_timestamp(skb, sockc); if (flags & MSG_CONFIRM) skb_set_dst_pending_confirm(skb, 1); skb->transport_header = skb->network_header; err = -EFAULT; if (memcpy_from_msg(iph, msg, length)) goto error_free; iphlen = iph->ihl * 4; /* * We don't want to modify the ip header, but we do need to * be sure that it won't cause problems later along the network * stack. Specifically we want to make sure that iph->ihl is a * sane value. If ihl points beyond the length of the buffer passed * in, reject the frame as invalid */ err = -EINVAL; if (iphlen > length) goto error_free; if (iphlen >= sizeof(*iph)) { if (!iph->saddr) iph->saddr = fl4->saddr; iph->check = 0; iph->tot_len = htons(length); if (!iph->id) ip_select_ident(net, skb, NULL); iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); skb->transport_header += iphlen; if (iph->protocol == IPPROTO_ICMP && length >= iphlen + sizeof(struct icmphdr)) icmp_out_count(net, ((struct icmphdr *) skb_transport_header(skb))->type); } err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, skb, NULL, rt->dst.dev, dst_output); if (err > 0) err = net_xmit_errno(err); if (err) goto error; out: return 0; error_free: kfree_skb(skb); error: IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS); if (err == -ENOBUFS && !inet_test_bit(RECVERR, sk)) err = 0; return err; } static int raw_probe_proto_opt(struct raw_frag_vec *rfv, struct flowi4 *fl4) { int err; if (fl4->flowi4_proto != IPPROTO_ICMP) return 0; /* We only need the first two bytes. */ rfv->hlen = 2; err = memcpy_from_msg(rfv->hdr.c, rfv->msg, rfv->hlen); if (err) return err; fl4->fl4_icmp_type = rfv->hdr.icmph.type; fl4->fl4_icmp_code = rfv->hdr.icmph.code; return 0; } static int raw_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) { struct raw_frag_vec *rfv = from; if (offset < rfv->hlen) { int copy = min(rfv->hlen - offset, len); if (skb->ip_summed == CHECKSUM_PARTIAL) memcpy(to, rfv->hdr.c + offset, copy); else skb->csum = csum_block_add( skb->csum, csum_partial_copy_nocheck(rfv->hdr.c + offset, to, copy), odd); odd = 0; offset += copy; to += copy; len -= copy; if (!len) return 0; } offset -= rfv->hlen; return ip_generic_getfrag(rfv->msg, to, offset, len, odd, skb); } static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) { struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); struct ipcm_cookie ipc; struct rtable *rt = NULL; struct flowi4 fl4; u8 scope; int free = 0; __be32 daddr; __be32 saddr; int uc_index, err; struct ip_options_data opt_copy; struct raw_frag_vec rfv; int hdrincl; err = -EMSGSIZE; if (len > 0xFFFF) goto out; hdrincl = inet_test_bit(HDRINCL, sk); /* * Check the flags. */ err = -EOPNOTSUPP; if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message */ goto out; /* compatibility */ /* * Get and verify the address. */ if (msg->msg_namelen) { DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name); err = -EINVAL; if (msg->msg_namelen < sizeof(*usin)) goto out; if (usin->sin_family != AF_INET) { pr_info_once("%s: %s forgot to set AF_INET. Fix it!\n", __func__, current->comm); err = -EAFNOSUPPORT; if (usin->sin_family) goto out; } daddr = usin->sin_addr.s_addr; /* ANK: I did not forget to get protocol from port field. * I just do not know, who uses this weirdness. * IP_HDRINCL is much more convenient. */ } else { err = -EDESTADDRREQ; if (sk->sk_state != TCP_ESTABLISHED) goto out; daddr = inet->inet_daddr; } ipcm_init_sk(&ipc, inet); /* Keep backward compat */ if (hdrincl) ipc.protocol = IPPROTO_RAW; if (msg->msg_controllen) { err = ip_cmsg_send(sk, msg, &ipc, false); if (unlikely(err)) { kfree(ipc.opt); goto out; } if (ipc.opt) free = 1; } saddr = ipc.addr; ipc.addr = daddr; if (!ipc.opt) { struct ip_options_rcu *inet_opt; rcu_read_lock(); inet_opt = rcu_dereference(inet->inet_opt); if (inet_opt) { memcpy(&opt_copy, inet_opt, sizeof(*inet_opt) + inet_opt->opt.optlen); ipc.opt = &opt_copy.opt; } rcu_read_unlock(); } if (ipc.opt) { err = -EINVAL; /* Linux does not mangle headers on raw sockets, * so that IP options + IP_HDRINCL is non-sense. */ if (hdrincl) goto done; if (ipc.opt->opt.srr) { if (!daddr) goto done; daddr = ipc.opt->opt.faddr; } } scope = ip_sendmsg_scope(inet, &ipc, msg); uc_index = READ_ONCE(inet->uc_index); if (ipv4_is_multicast(daddr)) { if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif)) ipc.oif = READ_ONCE(inet->mc_index); if (!saddr) saddr = READ_ONCE(inet->mc_addr); } else if (!ipc.oif) { ipc.oif = uc_index; } else if (ipv4_is_lbcast(daddr) && uc_index) { /* oif is set, packet is to local broadcast * and uc_index is set. oif is most likely set * by sk_bound_dev_if. If uc_index != oif check if the * oif is an L3 master and uc_index is an L3 slave. * If so, we want to allow the send using the uc_index. */ if (ipc.oif != uc_index && ipc.oif == l3mdev_master_ifindex_by_index(sock_net(sk), uc_index)) { ipc.oif = uc_index; } } flowi4_init_output(&fl4, ipc.oif, ipc.sockc.mark, ipc.tos & INET_DSCP_MASK, scope, hdrincl ? ipc.protocol : sk->sk_protocol, inet_sk_flowi_flags(sk) | (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), daddr, saddr, 0, 0, sk_uid(sk)); fl4.fl4_icmp_type = 0; fl4.fl4_icmp_code = 0; if (!hdrincl) { rfv.msg = msg; rfv.hlen = 0; err = raw_probe_proto_opt(&rfv, &fl4); if (err) goto done; } security_sk_classify_flow(sk, flowi4_to_flowi_common(&fl4)); rt = ip_route_output_flow(net, &fl4, sk); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; goto done; } err = -EACCES; if (rt->rt_flags & RTCF_BROADCAST && !sock_flag(sk, SOCK_BROADCAST)) goto done; if (msg->msg_flags & MSG_CONFIRM) goto do_confirm; back_from_confirm: if (hdrincl) err = raw_send_hdrinc(sk, &fl4, msg, len, &rt, msg->msg_flags, &ipc.sockc); else { if (!ipc.addr) ipc.addr = fl4.daddr; lock_sock(sk); err = ip_append_data(sk, &fl4, raw_getfrag, &rfv, len, 0, &ipc, &rt, msg->msg_flags); if (err) ip_flush_pending_frames(sk); else if (!(msg->msg_flags & MSG_MORE)) { err = ip_push_pending_frames(sk, &fl4); if (err == -ENOBUFS && !inet_test_bit(RECVERR, sk)) err = 0; } release_sock(sk); } done: if (free) kfree(ipc.opt); ip_rt_put(rt); out: if (err < 0) return err; return len; do_confirm: if (msg->msg_flags & MSG_PROBE) dst_confirm_neigh(&rt->dst, &fl4.daddr); if (!(msg->msg_flags & MSG_PROBE) || len) goto back_from_confirm; err = 0; goto done; } static void raw_close(struct sock *sk, long timeout) { /* * Raw sockets may have direct kernel references. Kill them. */ ip_ra_control(sk, 0, NULL); sk_common_release(sk); } static void raw_destroy(struct sock *sk) { lock_sock(sk); ip_flush_pending_frames(sk); release_sock(sk); } /* This gets rid of all the nasties in af_inet. -DaveM */ static int raw_bind(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len) { struct inet_sock *inet = inet_sk(sk); struct sockaddr_in *addr = (struct sockaddr_in *) uaddr; struct net *net = sock_net(sk); u32 tb_id = RT_TABLE_LOCAL; int ret = -EINVAL; int chk_addr_ret; lock_sock(sk); if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in)) goto out; if (sk->sk_bound_dev_if) tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id; chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id); ret = -EADDRNOTAVAIL; if (!inet_addr_valid_or_nonlocal(net, inet, addr->sin_addr.s_addr, chk_addr_ret)) goto out; inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr; if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) inet->inet_saddr = 0; /* Use device */ sk_dst_reset(sk); ret = 0; out: release_sock(sk); return ret; } /* * This should be easy, if there is something there * we return it, otherwise we block. */ static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); size_t copied = 0; int err = -EOPNOTSUPP; DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); struct sk_buff *skb; if (flags & MSG_OOB) goto out; if (flags & MSG_ERRQUEUE) { err = ip_recv_error(sk, msg, len, addr_len); goto out; } skb = skb_recv_datagram(sk, flags, &err); if (!skb) goto out; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto done; sock_recv_cmsgs(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; sin->sin_port = 0; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); *addr_len = sizeof(*sin); } if (inet_cmsg_flags(inet)) ip_cmsg_recv(msg, skb); if (flags & MSG_TRUNC) copied = skb->len; done: skb_free_datagram(sk, skb); out: if (err) return err; return copied; } static int raw_sk_init(struct sock *sk) { struct raw_sock *rp = raw_sk(sk); sk->sk_drop_counters = &rp->drop_counters; if (inet_sk(sk)->inet_num == IPPROTO_ICMP) memset(&rp->filter, 0, sizeof(rp->filter)); return 0; } static int raw_seticmpfilter(struct sock *sk, sockptr_t optval, int optlen) { if (optlen > sizeof(struct icmp_filter)) optlen = sizeof(struct icmp_filter); if (copy_from_sockptr(&raw_sk(sk)->filter, optval, optlen)) return -EFAULT; return 0; } static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen) { int len, ret = -EFAULT; if (get_user(len, optlen)) goto out; ret = -EINVAL; if (len < 0) goto out; if (len > sizeof(struct icmp_filter)) len = sizeof(struct icmp_filter); ret = -EFAULT; if (put_user(len, optlen) || copy_to_user(optval, &raw_sk(sk)->filter, len)) goto out; ret = 0; out: return ret; } static int do_raw_setsockopt(struct sock *sk, int optname, sockptr_t optval, unsigned int optlen) { if (optname == ICMP_FILTER) { if (inet_sk(sk)->inet_num != IPPROTO_ICMP) return -EOPNOTSUPP; else return raw_seticmpfilter(sk, optval, optlen); } return -ENOPROTOOPT; } static int raw_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen) { if (level != SOL_RAW) return ip_setsockopt(sk, level, optname, optval, optlen); return do_raw_setsockopt(sk, optname, optval, optlen); } static int do_raw_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen) { if (optname == ICMP_FILTER) { if (inet_sk(sk)->inet_num != IPPROTO_ICMP) return -EOPNOTSUPP; else return raw_geticmpfilter(sk, optval, optlen); } return -ENOPROTOOPT; } static int raw_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level != SOL_RAW) return ip_getsockopt(sk, level, optname, optval, optlen); return do_raw_getsockopt(sk, optname, optval, optlen); } static int raw_ioctl(struct sock *sk, int cmd, int *karg) { switch (cmd) { case SIOCOUTQ: { *karg = sk_wmem_alloc_get(sk); return 0; } case SIOCINQ: { struct sk_buff *skb; spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb) *karg = skb->len; else *karg = 0; spin_unlock_bh(&sk->sk_receive_queue.lock); return 0; } default: #ifdef CONFIG_IP_MROUTE return ipmr_ioctl(sk, cmd, karg); #else return -ENOIOCTLCMD; #endif } } #ifdef CONFIG_COMPAT static int compat_raw_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) { switch (cmd) { case SIOCOUTQ: case SIOCINQ: return -ENOIOCTLCMD; default: #ifdef CONFIG_IP_MROUTE return ipmr_compat_ioctl(sk, cmd, compat_ptr(arg)); #else return -ENOIOCTLCMD; #endif } } #endif int raw_abort(struct sock *sk, int err) { lock_sock(sk); sk->sk_err = err; sk_error_report(sk); __udp_disconnect(sk, 0); release_sock(sk); return 0; } EXPORT_SYMBOL_GPL(raw_abort); struct proto raw_prot = { .name = "RAW", .owner = THIS_MODULE, .close = raw_close, .destroy = raw_destroy, .connect = ip4_datagram_connect, .disconnect = __udp_disconnect, .ioctl = raw_ioctl, .init = raw_sk_init, .setsockopt = raw_setsockopt, .getsockopt = raw_getsockopt, .sendmsg = raw_sendmsg, .recvmsg = raw_recvmsg, .bind = raw_bind, .backlog_rcv = raw_rcv_skb, .release_cb = ip4_datagram_release_cb, .hash = raw_hash_sk, .unhash = raw_unhash_sk, .obj_size = sizeof(struct raw_sock), .useroffset = offsetof(struct raw_sock, filter), .usersize = sizeof_field(struct raw_sock, filter), .h.raw_hash = &raw_v4_hashinfo, #ifdef CONFIG_COMPAT .compat_ioctl = compat_raw_ioctl, #endif .diag_destroy = raw_abort, }; #ifdef CONFIG_PROC_FS static struct sock *raw_get_first(struct seq_file *seq, int bucket) { struct raw_hashinfo *h = pde_data(file_inode(seq->file)); struct raw_iter_state *state = raw_seq_private(seq); struct hlist_head *hlist; struct sock *sk; for (state->bucket = bucket; state->bucket < RAW_HTABLE_SIZE; ++state->bucket) { hlist = &h->ht[state->bucket]; sk_for_each(sk, hlist) { if (sock_net(sk) == seq_file_net(seq)) return sk; } } return NULL; } static struct sock *raw_get_next(struct seq_file *seq, struct sock *sk) { struct raw_iter_state *state = raw_seq_private(seq); do { sk = sk_next(sk); } while (sk && sock_net(sk) != seq_file_net(seq)); if (!sk) return raw_get_first(seq, state->bucket + 1); return sk; } static struct sock *raw_get_idx(struct seq_file *seq, loff_t pos) { struct sock *sk = raw_get_first(seq, 0); if (sk) while (pos && (sk = raw_get_next(seq, sk)) != NULL) --pos; return pos ? NULL : sk; } void *raw_seq_start(struct seq_file *seq, loff_t *pos) __acquires(&h->lock) { struct raw_hashinfo *h = pde_data(file_inode(seq->file)); spin_lock(&h->lock); return *pos ? raw_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; } EXPORT_SYMBOL_GPL(raw_seq_start); void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct sock *sk; if (v == SEQ_START_TOKEN) sk = raw_get_first(seq, 0); else sk = raw_get_next(seq, v); ++*pos; return sk; } EXPORT_SYMBOL_GPL(raw_seq_next); void raw_seq_stop(struct seq_file *seq, void *v) __releases(&h->lock) { struct raw_hashinfo *h = pde_data(file_inode(seq->file)); spin_unlock(&h->lock); } EXPORT_SYMBOL_GPL(raw_seq_stop); static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) { struct inet_sock *inet = inet_sk(sp); __be32 dest = inet->inet_daddr, src = inet->inet_rcv_saddr; __u16 destp = 0, srcp = inet->inet_num; seq_printf(seq, "%4d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u\n", i, src, srcp, dest, destp, sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), 0, 0L, 0, from_kuid_munged(seq_user_ns(seq), sk_uid(sp)), 0, sock_i_ino(sp), refcount_read(&sp->sk_refcnt), sp, sk_drops_read(sp)); } static int raw_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_printf(seq, " sl local_address rem_address st tx_queue " "rx_queue tr tm->when retrnsmt uid timeout " "inode ref pointer drops\n"); else raw_sock_seq_show(seq, v, raw_seq_private(seq)->bucket); return 0; } static const struct seq_operations raw_seq_ops = { .start = raw_seq_start, .next = raw_seq_next, .stop = raw_seq_stop, .show = raw_seq_show, }; static __net_init int raw_init_net(struct net *net) { if (!proc_create_net_data("raw", 0444, net->proc_net, &raw_seq_ops, sizeof(struct raw_iter_state), &raw_v4_hashinfo)) return -ENOMEM; return 0; } static __net_exit void raw_exit_net(struct net *net) { remove_proc_entry("raw", net->proc_net); } static __net_initdata struct pernet_operations raw_net_ops = { .init = raw_init_net, .exit = raw_exit_net, }; int __init raw_proc_init(void) { return register_pernet_subsys(&raw_net_ops); } void __init raw_proc_exit(void) { unregister_pernet_subsys(&raw_net_ops); } #endif /* CONFIG_PROC_FS */ static void raw_sysctl_init_net(struct net *net) { #ifdef CONFIG_NET_L3_MASTER_DEV net->ipv4.sysctl_raw_l3mdev_accept = 1; #endif } static int __net_init raw_sysctl_init(struct net *net) { raw_sysctl_init_net(net); return 0; } static struct pernet_operations __net_initdata raw_sysctl_ops = { .init = raw_sysctl_init, }; void __init raw_init(void) { raw_sysctl_init_net(&init_net); if (register_pernet_subsys(&raw_sysctl_ops)) panic("RAW: failed to init sysctl parameters.\n"); }
3784 3795 3778 3780 3795 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2019 Facebook * Copyright 2020 Google LLC. */ #include <linux/rculist.h> #include <linux/list.h> #include <linux/hash.h> #include <linux/types.h> #include <linux/spinlock.h> #include <linux/bpf.h> #include <linux/bpf_local_storage.h> #include <net/sock.h> #include <uapi/linux/sock_diag.h> #include <uapi/linux/btf.h> #include <linux/bpf_lsm.h> #include <linux/btf_ids.h> #include <linux/rcupdate_trace.h> DEFINE_BPF_STORAGE_CACHE(inode_cache); static struct bpf_local_storage __rcu ** inode_storage_ptr(void *owner) { struct inode *inode = owner; struct bpf_storage_blob *bsb; bsb = bpf_inode(inode); if (!bsb) return NULL; return &bsb->storage; } static struct bpf_local_storage_data *inode_storage_lookup(struct inode *inode, struct bpf_map *map, bool cacheit_lockit) { struct bpf_local_storage *inode_storage; struct bpf_local_storage_map *smap; struct bpf_storage_blob *bsb; bsb = bpf_inode(inode); if (!bsb) return NULL; inode_storage = rcu_dereference_check(bsb->storage, bpf_rcu_lock_held()); if (!inode_storage) return NULL; smap = (struct bpf_local_storage_map *)map; return bpf_local_storage_lookup(inode_storage, smap, cacheit_lockit); } void bpf_inode_storage_free(struct inode *inode) { struct bpf_local_storage *local_storage; struct bpf_storage_blob *bsb; bsb = bpf_inode(inode); if (!bsb) return; rcu_read_lock_dont_migrate(); local_storage = rcu_dereference(bsb->storage); if (!local_storage) goto out; bpf_local_storage_destroy(local_storage); out: rcu_read_unlock_migrate(); } static void *bpf_fd_inode_storage_lookup_elem(struct bpf_map *map, void *key) { struct bpf_local_storage_data *sdata; CLASS(fd_raw, f)(*(int *)key); if (fd_empty(f)) return ERR_PTR(-EBADF); sdata = inode_storage_lookup(file_inode(fd_file(f)), map, true); return sdata ? sdata->data : NULL; } static long bpf_fd_inode_storage_update_elem(struct bpf_map *map, void *key, void *value, u64 map_flags) { struct bpf_local_storage_data *sdata; CLASS(fd_raw, f)(*(int *)key); if (fd_empty(f)) return -EBADF; if (!inode_storage_ptr(file_inode(fd_file(f)))) return -EBADF; sdata = bpf_local_storage_update(file_inode(fd_file(f)), (struct bpf_local_storage_map *)map, value, map_flags, false, GFP_ATOMIC); return PTR_ERR_OR_ZERO(sdata); } static int inode_storage_delete(struct inode *inode, struct bpf_map *map) { struct bpf_local_storage_data *sdata; sdata = inode_storage_lookup(inode, map, false); if (!sdata) return -ENOENT; bpf_selem_unlink(SELEM(sdata), false); return 0; } static long bpf_fd_inode_storage_delete_elem(struct bpf_map *map, void *key) { CLASS(fd_raw, f)(*(int *)key); if (fd_empty(f)) return -EBADF; return inode_storage_delete(file_inode(fd_file(f)), map); } /* *gfp_flags* is a hidden argument provided by the verifier */ BPF_CALL_5(bpf_inode_storage_get, struct bpf_map *, map, struct inode *, inode, void *, value, u64, flags, gfp_t, gfp_flags) { struct bpf_local_storage_data *sdata; WARN_ON_ONCE(!bpf_rcu_lock_held()); if (flags & ~(BPF_LOCAL_STORAGE_GET_F_CREATE)) return (unsigned long)NULL; /* explicitly check that the inode_storage_ptr is not * NULL as inode_storage_lookup returns NULL in this case and * bpf_local_storage_update expects the owner to have a * valid storage pointer. */ if (!inode || !inode_storage_ptr(inode)) return (unsigned long)NULL; sdata = inode_storage_lookup(inode, map, true); if (sdata) return (unsigned long)sdata->data; /* This helper must only called from where the inode is guaranteed * to have a refcount and cannot be freed. */ if (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) { sdata = bpf_local_storage_update( inode, (struct bpf_local_storage_map *)map, value, BPF_NOEXIST, false, gfp_flags); return IS_ERR(sdata) ? (unsigned long)NULL : (unsigned long)sdata->data; } return (unsigned long)NULL; } BPF_CALL_2(bpf_inode_storage_delete, struct bpf_map *, map, struct inode *, inode) { WARN_ON_ONCE(!bpf_rcu_lock_held()); if (!inode) return -EINVAL; /* This helper must only called from where the inode is guaranteed * to have a refcount and cannot be freed. */ return inode_storage_delete(inode, map); } static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key) { return -ENOTSUPP; } static struct bpf_map *inode_storage_map_alloc(union bpf_attr *attr) { return bpf_local_storage_map_alloc(attr, &inode_cache, false); } static void inode_storage_map_free(struct bpf_map *map) { bpf_local_storage_map_free(map, &inode_cache, NULL); } const struct bpf_map_ops inode_storage_map_ops = { .map_meta_equal = bpf_map_meta_equal, .map_alloc_check = bpf_local_storage_map_alloc_check, .map_alloc = inode_storage_map_alloc, .map_free = inode_storage_map_free, .map_get_next_key = notsupp_get_next_key, .map_lookup_elem = bpf_fd_inode_storage_lookup_elem, .map_update_elem = bpf_fd_inode_storage_update_elem, .map_delete_elem = bpf_fd_inode_storage_delete_elem, .map_check_btf = bpf_local_storage_map_check_btf, .map_mem_usage = bpf_local_storage_map_mem_usage, .map_btf_id = &bpf_local_storage_map_btf_id[0], .map_owner_storage_ptr = inode_storage_ptr, }; BTF_ID_LIST_SINGLE(bpf_inode_storage_btf_ids, struct, inode) const struct bpf_func_proto bpf_inode_storage_get_proto = { .func = bpf_inode_storage_get, .gpl_only = false, .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL, .arg2_btf_id = &bpf_inode_storage_btf_ids[0], .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL, .arg4_type = ARG_ANYTHING, }; const struct bpf_func_proto bpf_inode_storage_delete_proto = { .func = bpf_inode_storage_delete, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL, .arg2_btf_id = &bpf_inode_storage_btf_ids[0], };
12 6 6 6 6 6 6 6 6 18 18 14 5 12 10 2 2 2 2 4 6 12 18 6 10 11 17 6 6 5 5 6 5 19 2 17 5 17 17 7 1 1 16 19 7 14 2 17 2 19 19 14 5 19 18 1 19 19 1 18 18 12 6 14 4 18 12 4 4 18 17 18 18 18 18 6 1 18 18 12 6 18 12 6 12 6 14 4 6 10 3 10 4 1 5 4 6 6 6 6 6 6 6 6 6 64 64 41 41 41 41 41 3 2 77 2 77 2 76 75 76 76 76 76 76 76 11 11 18 19 19 6 6 13 13 13 13 13 13 13 18 18 18 18 18 18 18 18 18 18 16 18 18 18 18 5 13 19 14 5 11 19 1 19 4 18 15 2 16 2 7 7 3 2 6 6 6 2 1 1 1 2 4 6 6 4 2 6 5 5 5 1 1 4 3 2 2 3 5 5 5 9 9 9 1 6 5 5 5 5 5 5 2 3 2 3 9 1 8 6 6 6 6 46 46 46 4 46 32 32 2 32 1 1 1 1 1 1 1 1 1 1 1 1 7 7 7 1 1 1 1 1 1 1 1 1 1 1 6 6 6 6 6 5 1 6 1 1 5 5 6 6 6 1 5 1 5 1 6 6 6 6 5 1 1 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 7 7 7 5 2 7 6 1 1 5 1 5 7 7 5 5 5 5 24 24 24 2 2 13 13 13 10 10 10 10 10 9 3 3 1 2 2 2 2 1 1 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2018-2025 Intel Corporation * * Transmit and frame generation functions. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/if_vlan.h> #include <linux/etherdevice.h> #include <linux/bitmap.h> #include <linux/rcupdate.h> #include <linux/export.h> #include <net/net_namespace.h> #include <net/ieee80211_radiotap.h> #include <net/cfg80211.h> #include <net/mac80211.h> #include <net/codel.h> #include <net/codel_impl.h> #include <linux/unaligned.h> #include <net/fq_impl.h> #include <net/sock.h> #include <net/gso.h> #include "ieee80211_i.h" #include "driver-ops.h" #include "led.h" #include "mesh.h" #include "wep.h" #include "wpa.h" #include "wme.h" #include "rate.h" /* misc utils */ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, struct sk_buff *skb, int group_addr, int next_frag_len) { int rate, mrate, erp, dur, i; struct ieee80211_rate *txrate; struct ieee80211_local *local = tx->local; struct ieee80211_supported_band *sband; struct ieee80211_hdr *hdr; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); /* assume HW handles this */ if (tx->rate.flags & (IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_VHT_MCS)) return 0; /* uh huh? */ if (WARN_ON_ONCE(tx->rate.idx < 0)) return 0; if (info->band >= NUM_NL80211_BANDS) return 0; sband = local->hw.wiphy->bands[info->band]; txrate = &sband->bitrates[tx->rate.idx]; erp = txrate->flags & IEEE80211_RATE_ERP_G; /* device is expected to do this */ if (sband->band == NL80211_BAND_S1GHZ) return 0; /* * data and mgmt (except PS Poll): * - during CFP: 32768 * - during contention period: * if addr1 is group address: 0 * if more fragments = 0 and addr1 is individual address: time to * transmit one ACK plus SIFS * if more fragments = 1 and addr1 is individual address: time to * transmit next fragment plus 2 x ACK plus 3 x SIFS * * IEEE 802.11, 9.6: * - control response frame (CTS or ACK) shall be transmitted using the * same rate as the immediately previous frame in the frame exchange * sequence, if this rate belongs to the PHY mandatory rates, or else * at the highest possible rate belonging to the PHY rates in the * BSSBasicRateSet */ hdr = (struct ieee80211_hdr *)skb->data; if (ieee80211_is_ctl(hdr->frame_control)) { /* TODO: These control frames are not currently sent by * mac80211, but should they be implemented, this function * needs to be updated to support duration field calculation. * * RTS: time needed to transmit pending data/mgmt frame plus * one CTS frame plus one ACK frame plus 3 x SIFS * CTS: duration of immediately previous RTS minus time * required to transmit CTS and its SIFS * ACK: 0 if immediately previous directed data/mgmt had * more=0, with more=1 duration in ACK frame is duration * from previous frame minus time needed to transmit ACK * and its SIFS * PS Poll: BIT(15) | BIT(14) | aid */ return 0; } /* data/mgmt */ if (0 /* FIX: data/mgmt during CFP */) return cpu_to_le16(32768); if (group_addr) /* Group address as the destination - no ACK */ return 0; /* Individual destination address: * IEEE 802.11, Ch. 9.6 (after IEEE 802.11g changes) * CTS and ACK frames shall be transmitted using the highest rate in * basic rate set that is less than or equal to the rate of the * immediately previous frame and that is using the same modulation * (CCK or OFDM). If no basic rate set matches with these requirements, * the highest mandatory rate of the PHY that is less than or equal to * the rate of the previous frame is used. * Mandatory rates for IEEE 802.11g PHY: 1, 2, 5.5, 11, 6, 12, 24 Mbps */ rate = -1; /* use lowest available if everything fails */ mrate = sband->bitrates[0].bitrate; for (i = 0; i < sband->n_bitrates; i++) { struct ieee80211_rate *r = &sband->bitrates[i]; u32 flag; if (r->bitrate > txrate->bitrate) break; if (tx->sdata->vif.bss_conf.basic_rates & BIT(i)) rate = r->bitrate; switch (sband->band) { case NL80211_BAND_2GHZ: case NL80211_BAND_LC: if (tx->sdata->deflink.operating_11g_mode) flag = IEEE80211_RATE_MANDATORY_G; else flag = IEEE80211_RATE_MANDATORY_B; break; case NL80211_BAND_5GHZ: case NL80211_BAND_6GHZ: flag = IEEE80211_RATE_MANDATORY_A; break; default: flag = 0; WARN_ON(1); break; } if (r->flags & flag) mrate = r->bitrate; } if (rate == -1) { /* No matching basic rate found; use highest suitable mandatory * PHY rate */ rate = mrate; } /* Don't calculate ACKs for QoS Frames with NoAck Policy set */ if (ieee80211_is_data_qos(hdr->frame_control) && *(ieee80211_get_qos_ctl(hdr)) & IEEE80211_QOS_CTL_ACK_POLICY_NOACK) dur = 0; else /* Time needed to transmit ACK * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up * to closest integer */ dur = ieee80211_frame_duration(sband->band, 10, rate, erp, tx->sdata->vif.bss_conf.use_short_preamble); if (next_frag_len) { /* Frame is fragmented: duration increases with time needed to * transmit next fragment plus ACK and 2 x SIFS. */ dur *= 2; /* ACK + SIFS */ /* next fragment */ dur += ieee80211_frame_duration(sband->band, next_frag_len, txrate->bitrate, erp, tx->sdata->vif.bss_conf.use_short_preamble); } return cpu_to_le16(dur); } /* tx handlers */ static ieee80211_tx_result debug_noinline ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx) { struct ieee80211_local *local = tx->local; struct ieee80211_if_managed *ifmgd; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); /* driver doesn't support power save */ if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS)) return TX_CONTINUE; /* hardware does dynamic power save */ if (ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) return TX_CONTINUE; /* dynamic power save disabled */ if (local->hw.conf.dynamic_ps_timeout <= 0) return TX_CONTINUE; /* we are scanning, don't enable power save */ if (local->scanning) return TX_CONTINUE; if (!local->ps_sdata) return TX_CONTINUE; /* No point if we're going to suspend */ if (local->quiescing) return TX_CONTINUE; /* dynamic ps is supported only in managed mode */ if (tx->sdata->vif.type != NL80211_IFTYPE_STATION) return TX_CONTINUE; if (unlikely(info->flags & IEEE80211_TX_INTFL_OFFCHAN_TX_OK)) return TX_CONTINUE; ifmgd = &tx->sdata->u.mgd; /* * Don't wakeup from power save if u-apsd is enabled, voip ac has * u-apsd enabled and the frame is in voip class. This effectively * means that even if all access categories have u-apsd enabled, in * practise u-apsd is only used with the voip ac. This is a * workaround for the case when received voip class packets do not * have correct qos tag for some reason, due the network or the * peer application. * * Note: ifmgd->uapsd_queues access is racy here. If the value is * changed via debugfs, user needs to reassociate manually to have * everything in sync. */ if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) && (ifmgd->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) && skb_get_queue_mapping(tx->skb) == IEEE80211_AC_VO) return TX_CONTINUE; if (local->hw.conf.flags & IEEE80211_CONF_PS) { ieee80211_stop_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_PS, false); ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED; wiphy_work_queue(local->hw.wiphy, &local->dynamic_ps_disable_work); } /* Don't restart the timer if we're not disassociated */ if (!ifmgd->associated) return TX_CONTINUE; mod_timer(&local->dynamic_ps_timer, jiffies + msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); return TX_CONTINUE; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); bool assoc = false; if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) return TX_CONTINUE; if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) && test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) && !ieee80211_is_probe_req(hdr->frame_control) && !ieee80211_is_any_nullfunc(hdr->frame_control)) /* * When software scanning only nullfunc frames (to notify * the sleep state to the AP) and probe requests (for the * active scan) are allowed, all other frames should not be * sent and we should not get here, but if we do * nonetheless, drop them to avoid sending them * off-channel. See the link below and * ieee80211_start_scan() for more. * * http://article.gmane.org/gmane.linux.kernel.wireless.general/30089 */ return TX_DROP; if (tx->sdata->vif.type == NL80211_IFTYPE_OCB) return TX_CONTINUE; if (tx->flags & IEEE80211_TX_PS_BUFFERED) return TX_CONTINUE; if (tx->sta) assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC); if (likely(tx->flags & IEEE80211_TX_UNICAST)) { if (unlikely(!assoc && ieee80211_is_data(hdr->frame_control))) { #ifdef CONFIG_MAC80211_VERBOSE_DEBUG sdata_info(tx->sdata, "dropped data frame to not associated station %pM\n", hdr->addr1); #endif I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc); return TX_DROP; } } else if (unlikely(ieee80211_is_data(hdr->frame_control) && ieee80211_vif_get_num_mcast_if(tx->sdata) == 0)) { /* * No associated STAs - no need to send multicast * frames. */ return TX_DROP; } return TX_CONTINUE; } /* This function is called whenever the AP is about to exceed the maximum limit * of buffered frames for power saving STAs. This situation should not really * happen often during normal operation, so dropping the oldest buffered packet * from each queue should be OK to make some room for new frames. */ static void purge_old_ps_buffers(struct ieee80211_local *local) { int total = 0, purged = 0; struct sk_buff *skb; struct ieee80211_sub_if_data *sdata; struct sta_info *sta; list_for_each_entry_rcu(sdata, &local->interfaces, list) { struct ps_data *ps; if (sdata->vif.type == NL80211_IFTYPE_AP) ps = &sdata->u.ap.ps; else if (ieee80211_vif_is_mesh(&sdata->vif)) ps = &sdata->u.mesh.ps; else continue; skb = skb_dequeue(&ps->bc_buf); if (skb) { purged++; ieee80211_free_txskb(&local->hw, skb); } total += skb_queue_len(&ps->bc_buf); } /* * Drop one frame from each station from the lowest-priority * AC that has frames at all. */ list_for_each_entry_rcu(sta, &local->sta_list, list) { int ac; for (ac = IEEE80211_AC_BK; ac >= IEEE80211_AC_VO; ac--) { skb = skb_dequeue(&sta->ps_tx_buf[ac]); total += skb_queue_len(&sta->ps_tx_buf[ac]); if (skb) { purged++; ieee80211_free_txskb(&local->hw, skb); break; } } } local->total_ps_buffered = total; ps_dbg_hw(&local->hw, "PS buffers full - purged %d frames\n", purged); } static ieee80211_tx_result ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; struct ps_data *ps; /* * broadcast/multicast frame * * If any of the associated/peer stations is in power save mode, * the frame is buffered to be sent after DTIM beacon frame. * This is done either by the hardware or us. */ /* powersaving STAs currently only in AP/VLAN/mesh mode */ if (tx->sdata->vif.type == NL80211_IFTYPE_AP || tx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { if (!tx->sdata->bss) return TX_CONTINUE; ps = &tx->sdata->bss->ps; } else if (ieee80211_vif_is_mesh(&tx->sdata->vif)) { ps = &tx->sdata->u.mesh.ps; } else { return TX_CONTINUE; } /* no buffering for ordered frames */ if (ieee80211_has_order(hdr->frame_control)) return TX_CONTINUE; if (ieee80211_is_probe_req(hdr->frame_control)) return TX_CONTINUE; if (ieee80211_hw_check(&tx->local->hw, QUEUE_CONTROL)) info->hw_queue = tx->sdata->vif.cab_queue; /* no stations in PS mode and no buffered packets */ if (!atomic_read(&ps->num_sta_ps) && skb_queue_empty(&ps->bc_buf)) return TX_CONTINUE; info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM; /* device releases frame after DTIM beacon */ if (!ieee80211_hw_check(&tx->local->hw, HOST_BROADCAST_PS_BUFFERING)) return TX_CONTINUE; /* buffered in mac80211 */ if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) purge_old_ps_buffers(tx->local); if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) { ps_dbg(tx->sdata, "BC TX buffer full - dropping the oldest frame\n"); ieee80211_free_txskb(&tx->local->hw, skb_dequeue(&ps->bc_buf)); } else tx->local->total_ps_buffered++; skb_queue_tail(&ps->bc_buf, tx->skb); return TX_QUEUED; } static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta, struct sk_buff *skb) { if (!ieee80211_is_mgmt(fc)) return 0; if (sta == NULL || !test_sta_flag(sta, WLAN_STA_MFP)) return 0; if (!ieee80211_is_robust_mgmt_frame(skb)) return 0; return 1; } static ieee80211_tx_result ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) { struct sta_info *sta = tx->sta; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; struct ieee80211_local *local = tx->local; if (unlikely(!sta)) return TX_CONTINUE; if (unlikely((test_sta_flag(sta, WLAN_STA_PS_STA) || test_sta_flag(sta, WLAN_STA_PS_DRIVER) || test_sta_flag(sta, WLAN_STA_PS_DELIVER)) && !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) { int ac = skb_get_queue_mapping(tx->skb); if (ieee80211_is_mgmt(hdr->frame_control) && !ieee80211_is_bufferable_mmpdu(tx->skb)) { info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; return TX_CONTINUE; } ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n", sta->sta.addr, sta->sta.aid, ac); if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) purge_old_ps_buffers(tx->local); /* sync with ieee80211_sta_ps_deliver_wakeup */ spin_lock(&sta->ps_lock); /* * STA woke up the meantime and all the frames on ps_tx_buf have * been queued to pending queue. No reordering can happen, go * ahead and Tx the packet. */ if (!test_sta_flag(sta, WLAN_STA_PS_STA) && !test_sta_flag(sta, WLAN_STA_PS_DRIVER) && !test_sta_flag(sta, WLAN_STA_PS_DELIVER)) { spin_unlock(&sta->ps_lock); return TX_CONTINUE; } if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) { struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]); ps_dbg(tx->sdata, "STA %pM TX buffer for AC %d full - dropping oldest frame\n", sta->sta.addr, ac); ieee80211_free_txskb(&local->hw, old); } else tx->local->total_ps_buffered++; info->control.jiffies = jiffies; info->control.vif = &tx->sdata->vif; info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING; info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS; skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb); spin_unlock(&sta->ps_lock); if (!timer_pending(&local->sta_cleanup)) mod_timer(&local->sta_cleanup, round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL)); /* * We queued up some frames, so the TIM bit might * need to be set, recalculate it. */ sta_info_recalc_tim(sta); return TX_QUEUED; } else if (unlikely(test_sta_flag(sta, WLAN_STA_PS_STA))) { ps_dbg(tx->sdata, "STA %pM in PS mode, but polling/in SP -> send frame\n", sta->sta.addr); } return TX_CONTINUE; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) { if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) return TX_CONTINUE; if (tx->flags & IEEE80211_TX_UNICAST) return ieee80211_tx_h_unicast_ps_buf(tx); else return ieee80211_tx_h_multicast_ps_buf(tx); } static ieee80211_tx_result debug_noinline ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); if (unlikely(tx->sdata->control_port_protocol == tx->skb->protocol)) { if (tx->sdata->control_port_no_encrypt) info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO; info->flags |= IEEE80211_TX_CTL_USE_MINRATE; } return TX_CONTINUE; } static struct ieee80211_key * ieee80211_select_link_key(struct ieee80211_tx_data *tx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); struct ieee80211_link_data *link; unsigned int link_id; link_id = u32_get_bits(info->control.flags, IEEE80211_TX_CTRL_MLO_LINK); if (link_id == IEEE80211_LINK_UNSPECIFIED) { link = &tx->sdata->deflink; } else { link = rcu_dereference(tx->sdata->link[link_id]); if (!link) return NULL; } if (ieee80211_is_group_privacy_action(tx->skb)) return rcu_dereference(link->default_multicast_key); else if (ieee80211_is_mgmt(hdr->frame_control) && is_multicast_ether_addr(hdr->addr1) && ieee80211_is_robust_mgmt_frame(tx->skb)) return rcu_dereference(link->default_mgmt_key); else if (is_multicast_ether_addr(hdr->addr1)) return rcu_dereference(link->default_multicast_key); return NULL; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) { struct ieee80211_key *key; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) { tx->key = NULL; return TX_CONTINUE; } if (tx->sta && (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx]))) tx->key = key; else if ((key = ieee80211_select_link_key(tx))) tx->key = key; else if (!is_multicast_ether_addr(hdr->addr1) && (key = rcu_dereference(tx->sdata->default_unicast_key))) tx->key = key; else tx->key = NULL; if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) { if (tx->key && tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) info->control.hw_key = &tx->key->conf; return TX_CONTINUE; } if (tx->key) { bool skip_hw = false; /* TODO: add threshold stuff again */ switch (tx->key->conf.cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_TKIP: if (!ieee80211_is_data_present(hdr->frame_control)) tx->key = NULL; break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (!ieee80211_is_data_present(hdr->frame_control) && !ieee80211_use_mfp(hdr->frame_control, tx->sta, tx->skb) && !ieee80211_is_group_privacy_action(tx->skb)) tx->key = NULL; else skip_hw = (tx->key->conf.flags & IEEE80211_KEY_FLAG_SW_MGMT_TX) && ieee80211_is_mgmt(hdr->frame_control); break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: if (!ieee80211_is_mgmt(hdr->frame_control)) tx->key = NULL; break; } if (unlikely(tx->key && tx->key->flags & KEY_FLAG_TAINTED && !ieee80211_is_deauth(hdr->frame_control)) && tx->skb->protocol != tx->sdata->control_port_protocol) return TX_DROP; if (!skip_hw && tx->key && tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) info->control.hw_key = &tx->key->conf; } else if (ieee80211_is_data_present(hdr->frame_control) && tx->sta && test_sta_flag(tx->sta, WLAN_STA_USES_ENCRYPTION)) { return TX_DROP; } return TX_CONTINUE; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); struct ieee80211_hdr *hdr = (void *)tx->skb->data; struct ieee80211_supported_band *sband; u32 len; struct ieee80211_tx_rate_control txrc; struct ieee80211_sta_rates *ratetbl = NULL; bool encap = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; bool assoc = false; memset(&txrc, 0, sizeof(txrc)); if (info->band < NUM_NL80211_BANDS) sband = tx->local->hw.wiphy->bands[info->band]; else return TX_CONTINUE; len = min_t(u32, tx->skb->len + FCS_LEN, tx->local->hw.wiphy->frag_threshold); /* set up the tx rate control struct we give the RC algo */ txrc.hw = &tx->local->hw; txrc.sband = sband; txrc.bss_conf = &tx->sdata->vif.bss_conf; txrc.skb = tx->skb; txrc.reported_rate.idx = -1; if (unlikely(info->control.flags & IEEE80211_TX_CTRL_DONT_USE_RATE_MASK)) { txrc.rate_idx_mask = ~0; } else { txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band]; if (tx->sdata->rc_has_mcs_mask[info->band]) txrc.rate_idx_mcs_mask = tx->sdata->rc_rateidx_mcs_mask[info->band]; } txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP || tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT || tx->sdata->vif.type == NL80211_IFTYPE_ADHOC || tx->sdata->vif.type == NL80211_IFTYPE_OCB); /* set up RTS protection if desired */ if (len > tx->local->hw.wiphy->rts_threshold) { txrc.rts = true; } info->control.use_rts = txrc.rts; info->control.use_cts_prot = tx->sdata->vif.bss_conf.use_cts_prot; /* * Use short preamble if the BSS can handle it, but not for * management frames unless we know the receiver can handle * that -- the management frame might be to a station that * just wants a probe response. */ if (tx->sdata->vif.bss_conf.use_short_preamble && (ieee80211_is_tx_data(tx->skb) || (tx->sta && test_sta_flag(tx->sta, WLAN_STA_SHORT_PREAMBLE)))) txrc.short_preamble = true; info->control.short_preamble = txrc.short_preamble; /* don't ask rate control when rate already injected via radiotap */ if (info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT) return TX_CONTINUE; if (tx->sta) assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC); /* * Lets not bother rate control if we're associated and cannot * talk to the sta. This should not happen. */ if (WARN(test_bit(SCAN_SW_SCANNING, &tx->local->scanning) && assoc && !rate_usable_index_exists(sband, &tx->sta->sta), "%s: Dropped data frame as no usable bitrate found while " "scanning and associated. Target station: " "%pM on %d GHz band\n", tx->sdata->name, encap ? ((struct ethhdr *)hdr)->h_dest : hdr->addr1, info->band ? 5 : 2)) return TX_DROP; /* * If we're associated with the sta at this point we know we can at * least send the frame at the lowest bit rate. */ rate_control_get_rate(tx->sdata, tx->sta, &txrc); if (tx->sta && !info->control.skip_table) ratetbl = rcu_dereference(tx->sta->sta.rates); if (unlikely(info->control.rates[0].idx < 0)) { if (ratetbl) { struct ieee80211_tx_rate rate = { .idx = ratetbl->rate[0].idx, .flags = ratetbl->rate[0].flags, .count = ratetbl->rate[0].count }; if (ratetbl->rate[0].idx < 0) return TX_DROP; tx->rate = rate; } else { return TX_DROP; } } else { tx->rate = info->control.rates[0]; } if (txrc.reported_rate.idx < 0) { txrc.reported_rate = tx->rate; if (tx->sta && ieee80211_is_tx_data(tx->skb)) tx->sta->deflink.tx_stats.last_rate = txrc.reported_rate; } else if (tx->sta) tx->sta->deflink.tx_stats.last_rate = txrc.reported_rate; if (ratetbl) return TX_CONTINUE; if (unlikely(!info->control.rates[0].count)) info->control.rates[0].count = 1; if (WARN_ON_ONCE((info->control.rates[0].count > 1) && (info->flags & IEEE80211_TX_CTL_NO_ACK))) info->control.rates[0].count = 1; return TX_CONTINUE; } static __le16 ieee80211_tx_next_seq(struct sta_info *sta, int tid) { u16 *seq = &sta->tid_seq[tid]; __le16 ret = cpu_to_le16(*seq); /* Increase the sequence number. */ *seq = (*seq + 0x10) & IEEE80211_SCTL_SEQ; return ret; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; int tid; /* * Packet injection may want to control the sequence * number, if we have no matching interface then we * neither assign one ourselves nor ask the driver to. */ if (unlikely(info->control.vif->type == NL80211_IFTYPE_MONITOR)) return TX_CONTINUE; if (unlikely(ieee80211_is_ctl(hdr->frame_control))) return TX_CONTINUE; if (ieee80211_hdrlen(hdr->frame_control) < 24) return TX_CONTINUE; if (ieee80211_is_qos_nullfunc(hdr->frame_control)) return TX_CONTINUE; if (info->control.flags & IEEE80211_TX_CTRL_NO_SEQNO) return TX_CONTINUE; /* SNS11 from 802.11be 10.3.2.14 */ if (unlikely(is_multicast_ether_addr(hdr->addr1) && ieee80211_vif_is_mld(info->control.vif) && info->control.vif->type == NL80211_IFTYPE_AP)) { if (info->control.flags & IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX) tx->sdata->mld_mcast_seq += 0x10; hdr->seq_ctrl = cpu_to_le16(tx->sdata->mld_mcast_seq); return TX_CONTINUE; } /* * Anything but QoS data that has a sequence number field * (is long enough) gets a sequence number from the global * counter. QoS data frames with a multicast destination * also use the global counter (802.11-2012 9.3.2.10). */ if (!ieee80211_is_data_qos(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1)) { /* driver should assign sequence number */ info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; /* for pure STA mode without beacons, we can do it */ hdr->seq_ctrl = cpu_to_le16(tx->sdata->sequence_number); tx->sdata->sequence_number += 0x10; if (tx->sta) tx->sta->deflink.tx_stats.msdu[IEEE80211_NUM_TIDS]++; return TX_CONTINUE; } /* * This should be true for injected/management frames only, for * management frames we have set the IEEE80211_TX_CTL_ASSIGN_SEQ * above since they are not QoS-data frames. */ if (!tx->sta) return TX_CONTINUE; /* include per-STA, per-TID sequence counter */ tid = ieee80211_get_tid(hdr); tx->sta->deflink.tx_stats.msdu[tid]++; hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid); return TX_CONTINUE; } static int ieee80211_fragment(struct ieee80211_tx_data *tx, struct sk_buff *skb, int hdrlen, int frag_threshold) { struct ieee80211_local *local = tx->local; struct ieee80211_tx_info *info; struct sk_buff *tmp; int per_fragm = frag_threshold - hdrlen - FCS_LEN; int pos = hdrlen + per_fragm; int rem = skb->len - hdrlen - per_fragm; if (WARN_ON(rem < 0)) return -EINVAL; /* first fragment was already added to queue by caller */ while (rem) { int fraglen = per_fragm; if (fraglen > rem) fraglen = rem; rem -= fraglen; tmp = dev_alloc_skb(local->tx_headroom + frag_threshold + IEEE80211_ENCRYPT_HEADROOM + IEEE80211_ENCRYPT_TAILROOM); if (!tmp) return -ENOMEM; __skb_queue_tail(&tx->skbs, tmp); skb_reserve(tmp, local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM); /* copy control information */ memcpy(tmp->cb, skb->cb, sizeof(tmp->cb)); info = IEEE80211_SKB_CB(tmp); info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT | IEEE80211_TX_CTL_FIRST_FRAGMENT); if (rem) info->flags |= IEEE80211_TX_CTL_MORE_FRAMES; skb_copy_queue_mapping(tmp, skb); tmp->priority = skb->priority; tmp->dev = skb->dev; /* copy header and data */ skb_put_data(tmp, skb->data, hdrlen); skb_put_data(tmp, skb->data + pos, fraglen); pos += fraglen; } /* adjust first fragment's length */ skb_trim(skb, hdrlen + per_fragm); return 0; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) { struct sk_buff *skb = tx->skb; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; int frag_threshold = tx->local->hw.wiphy->frag_threshold; int hdrlen; int fragnum; /* no matter what happens, tx->skb moves to tx->skbs */ __skb_queue_tail(&tx->skbs, skb); tx->skb = NULL; if (info->flags & IEEE80211_TX_CTL_DONTFRAG) return TX_CONTINUE; if (ieee80211_hw_check(&tx->local->hw, SUPPORTS_TX_FRAG)) return TX_CONTINUE; /* * Warn when submitting a fragmented A-MPDU frame and drop it. * This scenario is handled in ieee80211_tx_prepare but extra * caution taken here as fragmented ampdu may cause Tx stop. */ if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) return TX_DROP; hdrlen = ieee80211_hdrlen(hdr->frame_control); /* internal error, why isn't DONTFRAG set? */ if (WARN_ON(skb->len + FCS_LEN <= frag_threshold)) return TX_DROP; /* * Now fragment the frame. This will allocate all the fragments and * chain them (using skb as the first fragment) to skb->next. * During transmission, we will remove the successfully transmitted * fragments from this list. When the low-level driver rejects one * of the fragments then we will simply pretend to accept the skb * but store it away as pending. */ if (ieee80211_fragment(tx, skb, hdrlen, frag_threshold)) return TX_DROP; /* update duration/seq/flags of fragments */ fragnum = 0; skb_queue_walk(&tx->skbs, skb) { const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); hdr = (void *)skb->data; info = IEEE80211_SKB_CB(skb); if (!skb_queue_is_last(&tx->skbs, skb)) { hdr->frame_control |= morefrags; /* * No multi-rate retries for fragmented frames, that * would completely throw off the NAV at other STAs. */ info->control.rates[1].idx = -1; info->control.rates[2].idx = -1; info->control.rates[3].idx = -1; BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 4); info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE; } else { hdr->frame_control &= ~morefrags; } hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG); fragnum++; } return TX_CONTINUE; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_stats(struct ieee80211_tx_data *tx) { struct sk_buff *skb; int ac = -1; if (!tx->sta) return TX_CONTINUE; skb_queue_walk(&tx->skbs, skb) { ac = skb_get_queue_mapping(skb); tx->sta->deflink.tx_stats.bytes[ac] += skb->len; } if (ac >= 0) tx->sta->deflink.tx_stats.packets[ac]++; return TX_CONTINUE; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx) { if (!tx->key) return TX_CONTINUE; switch (tx->key->conf.cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: return ieee80211_crypto_wep_encrypt(tx); case WLAN_CIPHER_SUITE_TKIP: return ieee80211_crypto_tkip_encrypt(tx); case WLAN_CIPHER_SUITE_CCMP: return ieee80211_crypto_ccmp_encrypt( tx, IEEE80211_CCMP_MIC_LEN); case WLAN_CIPHER_SUITE_CCMP_256: return ieee80211_crypto_ccmp_encrypt( tx, IEEE80211_CCMP_256_MIC_LEN); case WLAN_CIPHER_SUITE_AES_CMAC: return ieee80211_crypto_aes_cmac_encrypt( tx, IEEE80211_CMAC_128_MIC_LEN); case WLAN_CIPHER_SUITE_BIP_CMAC_256: return ieee80211_crypto_aes_cmac_encrypt( tx, IEEE80211_CMAC_256_MIC_LEN); case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: return ieee80211_crypto_aes_gmac_encrypt(tx); case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: return ieee80211_crypto_gcmp_encrypt(tx); } return TX_DROP; } static ieee80211_tx_result debug_noinline ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx) { struct sk_buff *skb; struct ieee80211_hdr *hdr; int next_len; bool group_addr; skb_queue_walk(&tx->skbs, skb) { hdr = (void *) skb->data; if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) break; /* must not overwrite AID */ if (!skb_queue_is_last(&tx->skbs, skb)) { struct sk_buff *next = skb_queue_next(&tx->skbs, skb); next_len = next->len; } else next_len = 0; group_addr = is_multicast_ether_addr(hdr->addr1); hdr->duration_id = ieee80211_duration(tx, skb, group_addr, next_len); } return TX_CONTINUE; } /* actual transmit path */ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx, struct sk_buff *skb, struct ieee80211_tx_info *info, struct tid_ampdu_tx *tid_tx, int tid) { bool queued = false; bool reset_agg_timer = false; struct sk_buff *purge_skb = NULL; if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) { reset_agg_timer = true; } else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { /* * nothing -- this aggregation session is being started * but that might still fail with the driver */ } else if (!tx->sta->sta.txq[tid]) { spin_lock(&tx->sta->lock); /* * Need to re-check now, because we may get here * * 1) in the window during which the setup is actually * already done, but not marked yet because not all * packets are spliced over to the driver pending * queue yet -- if this happened we acquire the lock * either before or after the splice happens, but * need to recheck which of these cases happened. * * 2) during session teardown, if the OPERATIONAL bit * was cleared due to the teardown but the pointer * hasn't been assigned NULL yet (or we loaded it * before it was assigned) -- in this case it may * now be NULL which means we should just let the * packet pass through because splicing the frames * back is already done. */ tid_tx = rcu_dereference_protected_tid_tx(tx->sta, tid); if (!tid_tx) { /* do nothing, let packet pass through */ } else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) { reset_agg_timer = true; } else { queued = true; if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) { clear_sta_flag(tx->sta, WLAN_STA_SP); ps_dbg(tx->sta->sdata, "STA %pM aid %d: SP frame queued, close the SP w/o telling the peer\n", tx->sta->sta.addr, tx->sta->sta.aid); } info->control.vif = &tx->sdata->vif; info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING; info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS; __skb_queue_tail(&tid_tx->pending, skb); if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER) purge_skb = __skb_dequeue(&tid_tx->pending); } spin_unlock(&tx->sta->lock); if (purge_skb) ieee80211_free_txskb(&tx->local->hw, purge_skb); } /* reset session timer */ if (reset_agg_timer) tid_tx->last_tx = jiffies; return queued; } void ieee80211_aggr_check(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct sk_buff *skb) { struct rate_control_ref *ref = sdata->local->rate_ctrl; u16 tid; if (!ref || !(ref->ops->capa & RATE_CTRL_CAPA_AMPDU_TRIGGER)) return; if (!sta || (!sta->sta.valid_links && !sta->sta.deflink.ht_cap.ht_supported && !sta->sta.deflink.s1g_cap.s1g) || !sta->sta.wme || skb_get_queue_mapping(skb) == IEEE80211_AC_VO || skb->protocol == sdata->control_port_protocol) return; tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; if (likely(sta->ampdu_mlme.tid_tx[tid])) return; ieee80211_start_tx_ba_session(&sta->sta, tid, 0); } /* * initialises @tx * pass %NULL for the station if unknown, a valid pointer if known * or an ERR_PTR() if the station is known not to exist */ static ieee80211_tx_result ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, struct ieee80211_tx_data *tx, struct sta_info *sta, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; struct ieee80211_hdr *hdr; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); bool aggr_check = false; int tid; memset(tx, 0, sizeof(*tx)); tx->skb = skb; tx->local = local; tx->sdata = sdata; __skb_queue_head_init(&tx->skbs); /* * If this flag is set to true anywhere, and we get here, * we are doing the needed processing, so remove the flag * now. */ info->control.flags &= ~IEEE80211_TX_INTCFL_NEED_TXPROCESSING; hdr = (struct ieee80211_hdr *) skb->data; if (likely(sta)) { if (!IS_ERR(sta)) tx->sta = sta; } else { if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { tx->sta = rcu_dereference(sdata->u.vlan.sta); if (!tx->sta && sdata->wdev.use_4addr) return TX_DROP; } else if (tx->sdata->control_port_protocol == tx->skb->protocol) { tx->sta = sta_info_get_bss(sdata, hdr->addr1); } if (!tx->sta && !is_multicast_ether_addr(hdr->addr1)) { tx->sta = sta_info_get(sdata, hdr->addr1); aggr_check = true; } } if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) && !ieee80211_is_qos_nullfunc(hdr->frame_control) && ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) && !ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW)) { struct tid_ampdu_tx *tid_tx; tid = ieee80211_get_tid(hdr); tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]); if (!tid_tx && aggr_check) { ieee80211_aggr_check(sdata, tx->sta, skb); tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]); } if (tid_tx) { bool queued; queued = ieee80211_tx_prep_agg(tx, skb, info, tid_tx, tid); if (unlikely(queued)) return TX_QUEUED; } } if (is_multicast_ether_addr(hdr->addr1)) { tx->flags &= ~IEEE80211_TX_UNICAST; info->flags |= IEEE80211_TX_CTL_NO_ACK; } else tx->flags |= IEEE80211_TX_UNICAST; if (!(info->flags & IEEE80211_TX_CTL_DONTFRAG)) { if (!(tx->flags & IEEE80211_TX_UNICAST) || skb->len + FCS_LEN <= local->hw.wiphy->frag_threshold || info->flags & IEEE80211_TX_CTL_AMPDU) info->flags |= IEEE80211_TX_CTL_DONTFRAG; } if (!tx->sta) info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT)) { info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; ieee80211_check_fast_xmit(tx->sta); } info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT; return TX_CONTINUE; } static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local, struct ieee80211_vif *vif, struct sta_info *sta, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_txq *txq = NULL; if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) || (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE)) return NULL; if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && unlikely(!ieee80211_is_data_present(hdr->frame_control))) { if ((!ieee80211_is_mgmt(hdr->frame_control) || ieee80211_is_bufferable_mmpdu(skb) || vif->type == NL80211_IFTYPE_STATION) && sta && sta->uploaded) { /* * This will be NULL if the driver didn't set the * opt-in hardware flag. */ txq = sta->sta.txq[IEEE80211_NUM_TIDS]; } } else if (sta) { u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; if (!sta->uploaded) return NULL; txq = sta->sta.txq[tid]; } else { txq = vif->txq; } if (!txq) return NULL; return to_txq_info(txq); } static void ieee80211_set_skb_enqueue_time(struct sk_buff *skb) { struct sk_buff *next; codel_time_t now = codel_get_time(); skb_list_walk_safe(skb, skb, next) IEEE80211_SKB_CB(skb)->control.enqueue_time = now; } static u32 codel_skb_len_func(const struct sk_buff *skb) { return skb->len; } static codel_time_t codel_skb_time_func(const struct sk_buff *skb) { const struct ieee80211_tx_info *info; info = (const struct ieee80211_tx_info *)skb->cb; return info->control.enqueue_time; } static struct sk_buff *codel_dequeue_func(struct codel_vars *cvars, void *ctx) { struct ieee80211_local *local; struct txq_info *txqi; struct fq *fq; struct fq_flow *flow; txqi = ctx; local = vif_to_sdata(txqi->txq.vif)->local; fq = &local->fq; if (cvars == &txqi->def_cvars) flow = &txqi->tin.default_flow; else flow = &fq->flows[cvars - local->cvars]; return fq_flow_dequeue(fq, flow); } static void codel_drop_func(struct sk_buff *skb, void *ctx) { struct ieee80211_local *local; struct ieee80211_hw *hw; struct txq_info *txqi; txqi = ctx; local = vif_to_sdata(txqi->txq.vif)->local; hw = &local->hw; ieee80211_free_txskb(hw, skb); } static struct sk_buff *fq_tin_dequeue_func(struct fq *fq, struct fq_tin *tin, struct fq_flow *flow) { struct ieee80211_local *local; struct txq_info *txqi; struct codel_vars *cvars; struct codel_params *cparams; struct codel_stats *cstats; local = container_of(fq, struct ieee80211_local, fq); txqi = container_of(tin, struct txq_info, tin); cparams = &local->cparams; cstats = &txqi->cstats; if (flow == &tin->default_flow) cvars = &txqi->def_cvars; else cvars = &local->cvars[flow - fq->flows]; return codel_dequeue(txqi, &flow->backlog, cparams, cvars, cstats, codel_skb_len_func, codel_skb_time_func, codel_drop_func, codel_dequeue_func); } static void fq_skb_free_func(struct fq *fq, struct fq_tin *tin, struct fq_flow *flow, struct sk_buff *skb) { struct ieee80211_local *local; local = container_of(fq, struct ieee80211_local, fq); ieee80211_free_txskb(&local->hw, skb); } static void ieee80211_txq_enqueue(struct ieee80211_local *local, struct txq_info *txqi, struct sk_buff *skb) { struct fq *fq = &local->fq; struct fq_tin *tin = &txqi->tin; u32 flow_idx; ieee80211_set_skb_enqueue_time(skb); spin_lock_bh(&fq->lock); /* * For management frames, don't really apply codel etc., * we don't want to apply any shaping or anything we just * want to simplify the driver API by having them on the * txqi. */ if (unlikely(txqi->txq.tid == IEEE80211_NUM_TIDS)) { IEEE80211_SKB_CB(skb)->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING; __skb_queue_tail(&txqi->frags, skb); } else { flow_idx = fq_flow_idx(fq, skb); fq_tin_enqueue(fq, tin, flow_idx, skb, fq_skb_free_func); } spin_unlock_bh(&fq->lock); } static bool fq_vlan_filter_func(struct fq *fq, struct fq_tin *tin, struct fq_flow *flow, struct sk_buff *skb, void *data) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); return info->control.vif == data; } void ieee80211_txq_remove_vlan(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { struct fq *fq = &local->fq; struct txq_info *txqi; struct fq_tin *tin; struct ieee80211_sub_if_data *ap; if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP_VLAN)) return; ap = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); if (!ap->vif.txq) return; txqi = to_txq_info(ap->vif.txq); tin = &txqi->tin; spin_lock_bh(&fq->lock); fq_tin_filter(fq, tin, fq_vlan_filter_func, &sdata->vif, fq_skb_free_func); spin_unlock_bh(&fq->lock); } void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct txq_info *txqi, int tid) { fq_tin_init(&txqi->tin); codel_vars_init(&txqi->def_cvars); codel_stats_init(&txqi->cstats); __skb_queue_head_init(&txqi->frags); INIT_LIST_HEAD(&txqi->schedule_order); txqi->txq.vif = &sdata->vif; if (!sta) { sdata->vif.txq = &txqi->txq; txqi->txq.tid = 0; txqi->txq.ac = IEEE80211_AC_BE; return; } if (tid == IEEE80211_NUM_TIDS) { if (sdata->vif.type == NL80211_IFTYPE_STATION) { /* Drivers need to opt in to the management MPDU TXQ */ if (!ieee80211_hw_check(&sdata->local->hw, STA_MMPDU_TXQ)) return; } else if (!ieee80211_hw_check(&sdata->local->hw, BUFF_MMPDU_TXQ)) { /* Drivers need to opt in to the bufferable MMPDU TXQ */ return; } txqi->txq.ac = IEEE80211_AC_VO; } else { txqi->txq.ac = ieee80211_ac_from_tid(tid); } txqi->txq.sta = &sta->sta; txqi->txq.tid = tid; sta->sta.txq[tid] = &txqi->txq; } void ieee80211_txq_purge(struct ieee80211_local *local, struct txq_info *txqi) { struct fq *fq = &local->fq; struct fq_tin *tin = &txqi->tin; spin_lock_bh(&fq->lock); fq_tin_reset(fq, tin, fq_skb_free_func); ieee80211_purge_tx_queue(&local->hw, &txqi->frags); spin_unlock_bh(&fq->lock); spin_lock_bh(&local->active_txq_lock[txqi->txq.ac]); list_del_init(&txqi->schedule_order); spin_unlock_bh(&local->active_txq_lock[txqi->txq.ac]); } void ieee80211_txq_set_params(struct ieee80211_local *local, int radio_idx) { if (local->hw.wiphy->txq_limit) local->fq.limit = local->hw.wiphy->txq_limit; else local->hw.wiphy->txq_limit = local->fq.limit; if (local->hw.wiphy->txq_memory_limit) local->fq.memory_limit = local->hw.wiphy->txq_memory_limit; else local->hw.wiphy->txq_memory_limit = local->fq.memory_limit; if (local->hw.wiphy->txq_quantum) local->fq.quantum = local->hw.wiphy->txq_quantum; else local->hw.wiphy->txq_quantum = local->fq.quantum; } int ieee80211_txq_setup_flows(struct ieee80211_local *local) { struct fq *fq = &local->fq; int ret; int i; bool supp_vht = false; enum nl80211_band band; ret = fq_init(fq, 4096); if (ret) return ret; /* * If the hardware doesn't support VHT, it is safe to limit the maximum * queue size. 4 Mbytes is 64 max-size aggregates in 802.11n. */ for (band = 0; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband; sband = local->hw.wiphy->bands[band]; if (!sband) continue; supp_vht = supp_vht || sband->vht_cap.vht_supported; } if (!supp_vht) fq->memory_limit = 4 << 20; /* 4 Mbytes */ codel_params_init(&local->cparams); local->cparams.interval = MS2TIME(100); local->cparams.target = MS2TIME(20); local->cparams.ecn = true; local->cvars = kvcalloc(fq->flows_cnt, sizeof(local->cvars[0]), GFP_KERNEL); if (!local->cvars) { spin_lock_bh(&fq->lock); fq_reset(fq, fq_skb_free_func); spin_unlock_bh(&fq->lock); return -ENOMEM; } for (i = 0; i < fq->flows_cnt; i++) codel_vars_init(&local->cvars[i]); ieee80211_txq_set_params(local, -1); return 0; } void ieee80211_txq_teardown_flows(struct ieee80211_local *local) { struct fq *fq = &local->fq; kvfree(local->cvars); local->cvars = NULL; spin_lock_bh(&fq->lock); fq_reset(fq, fq_skb_free_func); spin_unlock_bh(&fq->lock); } static bool ieee80211_queue_skb(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct sk_buff *skb) { struct ieee80211_vif *vif; struct txq_info *txqi; if (sdata->vif.type == NL80211_IFTYPE_MONITOR) return false; if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); vif = &sdata->vif; txqi = ieee80211_get_txq(local, vif, sta, skb); if (!txqi) return false; ieee80211_txq_enqueue(local, txqi, skb); schedule_and_wake_txq(local, txqi); return true; } static bool ieee80211_tx_frags(struct ieee80211_local *local, struct ieee80211_vif *vif, struct sta_info *sta, struct sk_buff_head *skbs, bool txpending) { struct ieee80211_tx_control control = {}; struct sk_buff *skb, *tmp; unsigned long flags; skb_queue_walk_safe(skbs, skb, tmp) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int q = info->hw_queue; #ifdef CONFIG_MAC80211_VERBOSE_DEBUG if (WARN_ON_ONCE(q >= local->hw.queues)) { __skb_unlink(skb, skbs); ieee80211_free_txskb(&local->hw, skb); continue; } #endif spin_lock_irqsave(&local->queue_stop_reason_lock, flags); if (local->queue_stop_reasons[q] || (!txpending && !skb_queue_empty(&local->pending[q]))) { if (unlikely(info->flags & IEEE80211_TX_INTFL_OFFCHAN_TX_OK)) { if (local->queue_stop_reasons[q] & ~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL)) { /* * Drop off-channel frames if queues * are stopped for any reason other * than off-channel operation. Never * queue them. */ spin_unlock_irqrestore( &local->queue_stop_reason_lock, flags); ieee80211_purge_tx_queue(&local->hw, skbs); return true; } } else { /* * Since queue is stopped, queue up frames for * later transmission from the tx-pending * tasklet when the queue is woken again. */ if (txpending) skb_queue_splice_init(skbs, &local->pending[q]); else skb_queue_splice_tail_init(skbs, &local->pending[q]); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); return false; } } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); info->control.vif = vif; control.sta = sta ? &sta->sta : NULL; __skb_unlink(skb, skbs); drv_tx(local, &control, skb); } return true; } /* * Returns false if the frame couldn't be transmitted but was queued instead. */ static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff_head *skbs, struct sta_info *sta, bool txpending) { struct ieee80211_tx_info *info; struct ieee80211_sub_if_data *sdata; struct ieee80211_vif *vif; struct sk_buff *skb; bool result; if (WARN_ON(skb_queue_empty(skbs))) return true; skb = skb_peek(skbs); info = IEEE80211_SKB_CB(skb); sdata = vif_to_sdata(info->control.vif); if (sta && !sta->uploaded) sta = NULL; switch (sdata->vif.type) { case NL80211_IFTYPE_MONITOR: if ((sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) || ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR)) { vif = &sdata->vif; break; } sdata = rcu_dereference(local->monitor_sdata); if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) { vif = &sdata->vif; info->hw_queue = vif->hw_queue[skb_get_queue_mapping(skb)]; } else if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) { ieee80211_purge_tx_queue(&local->hw, skbs); return true; } else vif = NULL; break; case NL80211_IFTYPE_AP_VLAN: sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); fallthrough; default: vif = &sdata->vif; break; } result = ieee80211_tx_frags(local, vif, sta, skbs, txpending); WARN_ON_ONCE(!skb_queue_empty(skbs)); return result; } /* * Invoke TX handlers, return 0 on success and non-zero if the * frame was dropped or queued. * * The handlers are split into an early and late part. The latter is everything * that can be sensitive to reordering, and will be deferred to after packets * are dequeued from the intermediate queues (when they are enabled). */ static int invoke_tx_handlers_early(struct ieee80211_tx_data *tx) { ieee80211_tx_result res = TX_DROP; #define CALL_TXH(txh) \ do { \ res = txh(tx); \ if (res != TX_CONTINUE) \ goto txh_done; \ } while (0) CALL_TXH(ieee80211_tx_h_dynamic_ps); CALL_TXH(ieee80211_tx_h_check_assoc); CALL_TXH(ieee80211_tx_h_ps_buf); CALL_TXH(ieee80211_tx_h_check_control_port_protocol); CALL_TXH(ieee80211_tx_h_select_key); txh_done: if (unlikely(res == TX_DROP)) { tx->sdata->tx_handlers_drop++; if (tx->skb) ieee80211_free_txskb(&tx->local->hw, tx->skb); else ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs); return -1; } else if (unlikely(res == TX_QUEUED)) { I802_DEBUG_INC(tx->local->tx_handlers_queued); return -1; } return 0; } /* * Late handlers can be called while the sta lock is held. Handlers that can * cause packets to be generated will cause deadlock! */ static int invoke_tx_handlers_late(struct ieee80211_tx_data *tx) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); ieee80211_tx_result res = TX_CONTINUE; if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL)) CALL_TXH(ieee80211_tx_h_rate_ctrl); if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) { __skb_queue_tail(&tx->skbs, tx->skb); tx->skb = NULL; goto txh_done; } CALL_TXH(ieee80211_tx_h_michael_mic_add); CALL_TXH(ieee80211_tx_h_sequence); CALL_TXH(ieee80211_tx_h_fragment); /* handlers after fragment must be aware of tx info fragmentation! */ CALL_TXH(ieee80211_tx_h_stats); CALL_TXH(ieee80211_tx_h_encrypt); if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL)) CALL_TXH(ieee80211_tx_h_calculate_duration); #undef CALL_TXH txh_done: if (unlikely(res == TX_DROP)) { tx->sdata->tx_handlers_drop++; if (tx->skb) ieee80211_free_txskb(&tx->local->hw, tx->skb); else ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs); return -1; } else if (unlikely(res == TX_QUEUED)) { I802_DEBUG_INC(tx->local->tx_handlers_queued); return -1; } return 0; } static int invoke_tx_handlers(struct ieee80211_tx_data *tx) { int r = invoke_tx_handlers_early(tx); if (r) return r; return invoke_tx_handlers_late(tx); } bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct sk_buff *skb, int band, struct ieee80211_sta **sta) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_data tx; struct sk_buff *skb2; if (ieee80211_tx_prepare(sdata, &tx, NULL, skb) == TX_DROP) return false; info->band = band; info->control.vif = vif; info->hw_queue = vif->hw_queue[skb_get_queue_mapping(skb)]; if (invoke_tx_handlers(&tx)) return false; if (sta) { if (tx.sta) *sta = &tx.sta->sta; else *sta = NULL; } /* this function isn't suitable for fragmented data frames */ skb2 = __skb_dequeue(&tx.skbs); if (WARN_ON(skb2 != skb || !skb_queue_empty(&tx.skbs))) { ieee80211_free_txskb(hw, skb2); ieee80211_purge_tx_queue(hw, &tx.skbs); return false; } return true; } EXPORT_SYMBOL(ieee80211_tx_prepare_skb); /* * Returns false if the frame couldn't be transmitted but was queued instead. */ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct sk_buff *skb, bool txpending) { struct ieee80211_local *local = sdata->local; struct ieee80211_tx_data tx; ieee80211_tx_result res_prepare; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); bool result = true; if (unlikely(skb->len < 10)) { dev_kfree_skb(skb); return true; } /* initialises tx */ res_prepare = ieee80211_tx_prepare(sdata, &tx, sta, skb); if (unlikely(res_prepare == TX_DROP)) { ieee80211_free_txskb(&local->hw, skb); tx.sdata->tx_handlers_drop++; return true; } else if (unlikely(res_prepare == TX_QUEUED)) { return true; } /* set up hw_queue value early */ if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) || !ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)]; if (invoke_tx_handlers_early(&tx)) return true; if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb)) return true; if (!invoke_tx_handlers_late(&tx)) result = __ieee80211_tx(local, &tx.skbs, tx.sta, txpending); return result; } /* device xmit handlers */ enum ieee80211_encrypt { ENCRYPT_NO, ENCRYPT_MGMT, ENCRYPT_DATA, }; static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int head_need, enum ieee80211_encrypt encrypt) { struct ieee80211_local *local = sdata->local; bool enc_tailroom; int tail_need = 0; enc_tailroom = encrypt == ENCRYPT_MGMT || (encrypt == ENCRYPT_DATA && sdata->crypto_tx_tailroom_needed_cnt); if (enc_tailroom) { tail_need = IEEE80211_ENCRYPT_TAILROOM; tail_need -= skb_tailroom(skb); tail_need = max_t(int, tail_need, 0); } if (skb_cloned(skb) && (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) || !skb_clone_writable(skb, ETH_HLEN) || enc_tailroom)) I802_DEBUG_INC(local->tx_expand_skb_head_cloned); else if (head_need || tail_need) I802_DEBUG_INC(local->tx_expand_skb_head); else return 0; if (pskb_expand_head(skb, head_need, tail_need, GFP_ATOMIC)) { wiphy_debug(local->hw.wiphy, "failed to reallocate TX buffer\n"); return -ENOMEM; } return 0; } void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; int headroom; enum ieee80211_encrypt encrypt; if (info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT) encrypt = ENCRYPT_NO; else if (ieee80211_is_mgmt(hdr->frame_control)) encrypt = ENCRYPT_MGMT; else encrypt = ENCRYPT_DATA; headroom = local->tx_headroom; if (encrypt != ENCRYPT_NO) headroom += IEEE80211_ENCRYPT_HEADROOM; headroom -= skb_headroom(skb); headroom = max_t(int, 0, headroom); if (ieee80211_skb_resize(sdata, skb, headroom, encrypt)) { ieee80211_free_txskb(&local->hw, skb); return; } /* reload after potential resize */ hdr = (struct ieee80211_hdr *) skb->data; info->control.vif = &sdata->vif; if (ieee80211_vif_is_mesh(&sdata->vif)) { if (ieee80211_is_data(hdr->frame_control) && is_unicast_ether_addr(hdr->addr1)) { if (mesh_nexthop_resolve(sdata, skb)) return; /* skb queued: don't free */ } else { ieee80211_mps_set_frame_flags(sdata, NULL, hdr); } } ieee80211_set_qos_hdr(sdata, skb); ieee80211_tx(sdata, sta, skb, false); } static bool ieee80211_validate_radiotap_len(struct sk_buff *skb) { struct ieee80211_radiotap_header *rthdr = (struct ieee80211_radiotap_header *)skb->data; /* check for not even having the fixed radiotap header part */ if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header))) return false; /* too short to be possibly valid */ /* is it a header version we can trust to find length from? */ if (unlikely(rthdr->it_version)) return false; /* only version 0 is supported */ /* does the skb contain enough to deliver on the alleged length? */ if (unlikely(skb->len < ieee80211_get_radiotap_len(skb->data))) return false; /* skb too short for claimed rt header extent */ return true; } bool ieee80211_parse_tx_radiotap(struct sk_buff *skb, struct net_device *dev) { struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); struct ieee80211_radiotap_iterator iterator; struct ieee80211_radiotap_header *rthdr = (struct ieee80211_radiotap_header *) skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len, NULL); u16 txflags; u16 rate = 0; bool rate_found = false; u8 rate_retries = 0; u16 rate_flags = 0; u8 mcs_known, mcs_flags, mcs_bw; u16 vht_known; u8 vht_mcs = 0, vht_nss = 0; int i; if (!ieee80211_validate_radiotap_len(skb)) return false; info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT | IEEE80211_TX_CTL_DONTFRAG; /* * for every radiotap entry that is present * (ieee80211_radiotap_iterator_next returns -ENOENT when no more * entries present, or -EINVAL on error) */ while (!ret) { ret = ieee80211_radiotap_iterator_next(&iterator); if (ret) continue; /* see if this argument is something we can use */ switch (iterator.this_arg_index) { /* * You must take care when dereferencing iterator.this_arg * for multibyte types... the pointer is not aligned. Use * get_unaligned((type *)iterator.this_arg) to dereference * iterator.this_arg for type "type" safely on all arches. */ case IEEE80211_RADIOTAP_FLAGS: if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) { /* * this indicates that the skb we have been * handed has the 32-bit FCS CRC at the end... * we should react to that by snipping it off * because it will be recomputed and added * on transmission */ if (skb->len < (iterator._max_length + FCS_LEN)) return false; skb_trim(skb, skb->len - FCS_LEN); } if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP) info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT; if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) info->flags &= ~IEEE80211_TX_CTL_DONTFRAG; break; case IEEE80211_RADIOTAP_TX_FLAGS: txflags = get_unaligned_le16(iterator.this_arg); if (txflags & IEEE80211_RADIOTAP_F_TX_NOACK) info->flags |= IEEE80211_TX_CTL_NO_ACK; if (txflags & IEEE80211_RADIOTAP_F_TX_NOSEQNO) info->control.flags |= IEEE80211_TX_CTRL_NO_SEQNO; if (txflags & IEEE80211_RADIOTAP_F_TX_ORDER) info->control.flags |= IEEE80211_TX_CTRL_DONT_REORDER; break; case IEEE80211_RADIOTAP_RATE: rate = *iterator.this_arg; rate_flags = 0; rate_found = true; break; case IEEE80211_RADIOTAP_ANTENNA: /* this can appear multiple times, keep a bitmap */ info->control.antennas |= BIT(*iterator.this_arg); break; case IEEE80211_RADIOTAP_DATA_RETRIES: rate_retries = *iterator.this_arg; break; case IEEE80211_RADIOTAP_MCS: mcs_known = iterator.this_arg[0]; mcs_flags = iterator.this_arg[1]; if (!(mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_MCS)) break; rate_found = true; rate = iterator.this_arg[2]; rate_flags = IEEE80211_TX_RC_MCS; if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_GI && mcs_flags & IEEE80211_RADIOTAP_MCS_SGI) rate_flags |= IEEE80211_TX_RC_SHORT_GI; mcs_bw = mcs_flags & IEEE80211_RADIOTAP_MCS_BW_MASK; if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_BW && mcs_bw == IEEE80211_RADIOTAP_MCS_BW_40) rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_FEC && mcs_flags & IEEE80211_RADIOTAP_MCS_FEC_LDPC) info->flags |= IEEE80211_TX_CTL_LDPC; if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_STBC) { u8 stbc = u8_get_bits(mcs_flags, IEEE80211_RADIOTAP_MCS_STBC_MASK); info->flags |= u32_encode_bits(stbc, IEEE80211_TX_CTL_STBC); } break; case IEEE80211_RADIOTAP_VHT: vht_known = get_unaligned_le16(iterator.this_arg); rate_found = true; rate_flags = IEEE80211_TX_RC_VHT_MCS; if ((vht_known & IEEE80211_RADIOTAP_VHT_KNOWN_GI) && (iterator.this_arg[2] & IEEE80211_RADIOTAP_VHT_FLAG_SGI)) rate_flags |= IEEE80211_TX_RC_SHORT_GI; if (vht_known & IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH) { if (iterator.this_arg[3] == 1) rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; else if (iterator.this_arg[3] == 4) rate_flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; else if (iterator.this_arg[3] == 11) rate_flags |= IEEE80211_TX_RC_160_MHZ_WIDTH; } vht_mcs = iterator.this_arg[4] >> 4; if (vht_mcs > 11) vht_mcs = 0; vht_nss = iterator.this_arg[4] & 0xF; if (!vht_nss || vht_nss > 8) vht_nss = 1; break; /* * Please update the file * Documentation/networking/mac80211-injection.rst * when parsing new fields here. */ default: break; } } if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */ return false; if (rate_found) { struct ieee80211_supported_band *sband = local->hw.wiphy->bands[info->band]; info->control.flags |= IEEE80211_TX_CTRL_RATE_INJECT; for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { info->control.rates[i].idx = -1; info->control.rates[i].flags = 0; info->control.rates[i].count = 0; } if (rate_flags & IEEE80211_TX_RC_MCS) { /* reset antennas if not enough */ if (IEEE80211_HT_MCS_CHAINS(rate) > hweight8(info->control.antennas)) info->control.antennas = 0; info->control.rates[0].idx = rate; } else if (rate_flags & IEEE80211_TX_RC_VHT_MCS) { /* reset antennas if not enough */ if (vht_nss > hweight8(info->control.antennas)) info->control.antennas = 0; ieee80211_rate_set_vht(info->control.rates, vht_mcs, vht_nss); } else if (sband) { for (i = 0; i < sband->n_bitrates; i++) { if (rate * 5 != sband->bitrates[i].bitrate) continue; info->control.rates[0].idx = i; break; } } if (info->control.rates[0].idx < 0) info->control.flags &= ~IEEE80211_TX_CTRL_RATE_INJECT; info->control.rates[0].flags = rate_flags; info->control.rates[0].count = min_t(u8, rate_retries + 1, local->hw.max_rate_tries); } return true; } netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr; struct ieee80211_sub_if_data *tmp_sdata, *sdata; struct cfg80211_chan_def *chandef; u16 len_rthdr; int hdrlen; sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (unlikely(!ieee80211_sdata_running(sdata))) goto fail; memset(info, 0, sizeof(*info)); info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS | IEEE80211_TX_CTL_INJECTED; /* Sanity-check the length of the radiotap header */ if (!ieee80211_validate_radiotap_len(skb)) goto fail; /* we now know there is a radiotap header with a length we can use */ len_rthdr = ieee80211_get_radiotap_len(skb->data); /* * fix up the pointers accounting for the radiotap * header still being in there. We are being given * a precooked IEEE80211 header so no need for * normal processing */ skb_set_mac_header(skb, len_rthdr); /* * these are just fixed to the end of the rt area since we * don't have any better information and at this point, nobody cares */ skb_set_network_header(skb, len_rthdr); skb_set_transport_header(skb, len_rthdr); if (skb->len < len_rthdr + 2) goto fail; hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr); hdrlen = ieee80211_hdrlen(hdr->frame_control); if (skb->len < len_rthdr + hdrlen) goto fail; /* * Initialize skb->protocol if the injected frame is a data frame * carrying a rfc1042 header */ if (ieee80211_is_data(hdr->frame_control) && skb->len >= len_rthdr + hdrlen + sizeof(rfc1042_header) + 2) { u8 *payload = (u8 *)hdr + hdrlen; if (ether_addr_equal(payload, rfc1042_header)) skb->protocol = cpu_to_be16((payload[6] << 8) | payload[7]); } rcu_read_lock(); /* * We process outgoing injected frames that have a local address * we handle as though they are non-injected frames. * This code here isn't entirely correct, the local MAC address * isn't always enough to find the interface to use; for proper * VLAN support we have an nl80211-based mechanism. * * This is necessary, for example, for old hostapd versions that * don't use nl80211-based management TX/RX. */ list_for_each_entry_rcu(tmp_sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(tmp_sdata)) continue; if (tmp_sdata->vif.type == NL80211_IFTYPE_MONITOR || tmp_sdata->vif.type == NL80211_IFTYPE_AP_VLAN) continue; if (ether_addr_equal(tmp_sdata->vif.addr, hdr->addr2)) { sdata = tmp_sdata; break; } } chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); if (!chanctx_conf) { tmp_sdata = rcu_dereference(local->monitor_sdata); if (tmp_sdata) chanctx_conf = rcu_dereference(tmp_sdata->vif.bss_conf.chanctx_conf); } if (chanctx_conf) chandef = &chanctx_conf->def; else if (local->emulate_chanctx) chandef = &local->hw.conf.chandef; else goto fail_rcu; /* * If driver/HW supports IEEE80211_CHAN_CAN_MONITOR we still * shouldn't transmit on disabled channels. */ if (!cfg80211_chandef_usable(local->hw.wiphy, chandef, IEEE80211_CHAN_DISABLED)) goto fail_rcu; /* * Frame injection is not allowed if beaconing is not allowed * or if we need radar detection. Beaconing is usually not allowed when * the mode or operation (Adhoc, AP, Mesh) does not support DFS. * Passive scan is also used in world regulatory domains where * your country is not known and as such it should be treated as * NO TX unless the channel is explicitly allowed in which case * your current regulatory domain would not have the passive scan * flag. * * Since AP mode uses monitor interfaces to inject/TX management * frames we can make AP mode the exception to this rule once it * supports radar detection as its implementation can deal with * radar detection by itself. We can do that later by adding a * monitor flag interfaces used for AP support. */ if (!cfg80211_reg_can_beacon(local->hw.wiphy, chandef, sdata->vif.type)) goto fail_rcu; info->band = chandef->chan->band; /* Initialize skb->priority according to frame type and TID class, * with respect to the sub interface that the frame will actually * be transmitted on. If the DONT_REORDER flag is set, the original * skb-priority is preserved to assure frames injected with this * flag are not reordered relative to each other. */ ieee80211_select_queue_80211(sdata, skb, hdr); skb_set_queue_mapping(skb, ieee80211_ac_from_tid(skb->priority)); /* * Process the radiotap header. This will now take into account the * selected chandef above to accurately set injection rates and * retransmissions. */ if (!ieee80211_parse_tx_radiotap(skb, dev)) goto fail_rcu; /* remove the injection radiotap header */ skb_pull(skb, len_rthdr); ieee80211_xmit(sdata, NULL, skb); rcu_read_unlock(); return NETDEV_TX_OK; fail_rcu: rcu_read_unlock(); fail: dev_kfree_skb(skb); return NETDEV_TX_OK; /* meaning, we dealt with the skb */ } static inline bool ieee80211_is_tdls_setup(struct sk_buff *skb) { u16 ethertype = (skb->data[12] << 8) | skb->data[13]; return ethertype == ETH_P_TDLS && skb->len > 14 && skb->data[14] == WLAN_TDLS_SNAP_RFTYPE; } int ieee80211_lookup_ra_sta(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, struct sta_info **sta_out) { struct sta_info *sta; switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: sta = rcu_dereference(sdata->u.vlan.sta); if (sta) { *sta_out = sta; return 0; } else if (sdata->wdev.use_4addr) { return -ENOLINK; } fallthrough; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_OCB: case NL80211_IFTYPE_ADHOC: if (is_multicast_ether_addr(skb->data)) { *sta_out = ERR_PTR(-ENOENT); return 0; } sta = sta_info_get_bss(sdata, skb->data); break; #ifdef CONFIG_MAC80211_MESH case NL80211_IFTYPE_MESH_POINT: /* determined much later */ *sta_out = NULL; return 0; #endif case NL80211_IFTYPE_STATION: if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) { sta = sta_info_get(sdata, skb->data); if (sta && test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) { *sta_out = sta; return 0; } /* * TDLS link during setup - throw out frames to * peer. Allow TDLS-setup frames to unauthorized * peers for the special case of a link teardown * after a TDLS sta is removed due to being * unreachable. */ if (!ieee80211_is_tdls_setup(skb)) return -EINVAL; } } sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr); if (!sta) return -ENOLINK; break; default: return -EINVAL; } *sta_out = sta ?: ERR_PTR(-ENOENT); return 0; } static u16 ieee80211_store_ack_skb(struct ieee80211_local *local, struct sk_buff *skb, u32 *info_flags, u64 *cookie) { struct sk_buff *ack_skb; u16 info_id = 0; if (skb->sk) ack_skb = skb_clone_sk(skb); else ack_skb = skb_clone(skb, GFP_ATOMIC); if (ack_skb) { unsigned long flags; int id; spin_lock_irqsave(&local->ack_status_lock, flags); id = idr_alloc(&local->ack_status_frames, ack_skb, 1, 0x2000, GFP_ATOMIC); spin_unlock_irqrestore(&local->ack_status_lock, flags); if (id >= 0) { info_id = id; *info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; if (cookie) { *cookie = ieee80211_mgmt_tx_cookie(local); IEEE80211_SKB_CB(ack_skb)->ack.cookie = *cookie; } } else { kfree_skb(ack_skb); } } return info_id; } /** * ieee80211_build_hdr - build 802.11 header in the given frame * @sdata: virtual interface to build the header for * @skb: the skb to build the header in * @info_flags: skb flags to set * @sta: the station pointer * @ctrl_flags: info control flags to set * @cookie: cookie pointer to fill (if not %NULL) * * This function takes the skb with 802.3 header and reformats the header to * the appropriate IEEE 802.11 header based on which interface the packet is * being transmitted on. * * Note that this function also takes care of the TX status request and * potential unsharing of the SKB - this needs to be interleaved with the * header building. * * The function requires the read-side RCU lock held * * Returns: the (possibly reallocated) skb or an ERR_PTR() code */ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, u32 info_flags, struct sta_info *sta, u32 ctrl_flags, u64 *cookie) { struct ieee80211_local *local = sdata->local; struct ieee80211_tx_info *info; int head_need; u16 ethertype, hdrlen, meshhdrlen = 0; __le16 fc; struct ieee80211_hdr hdr; struct ieee80211s_hdr mesh_hdr __maybe_unused; struct mesh_path __maybe_unused *mppath = NULL, *mpath = NULL; const u8 *encaps_data; int encaps_len, skip_header_bytes; bool wme_sta = false, authorized = false; bool tdls_peer; bool multicast; u16 info_id = 0; struct ieee80211_chanctx_conf *chanctx_conf = NULL; enum nl80211_band band; int ret; u8 link_id = u32_get_bits(ctrl_flags, IEEE80211_TX_CTRL_MLO_LINK); if (IS_ERR(sta)) sta = NULL; #ifdef CONFIG_MAC80211_DEBUGFS if (local->force_tx_status) info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; #endif /* convert Ethernet header to proper 802.11 header (based on * operation mode) */ ethertype = (skb->data[12] << 8) | skb->data[13]; fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); if (!ieee80211_vif_is_mld(&sdata->vif)) chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: if (sdata->wdev.use_4addr) { fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); /* RA TA DA SA */ memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN); memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); memcpy(hdr.addr3, skb->data, ETH_ALEN); memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); hdrlen = 30; authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED); wme_sta = sta->sta.wme; } if (!ieee80211_vif_is_mld(&sdata->vif)) { struct ieee80211_sub_if_data *ap_sdata; /* override chanctx_conf from AP (we don't have one) */ ap_sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); chanctx_conf = rcu_dereference(ap_sdata->vif.bss_conf.chanctx_conf); } if (sdata->wdev.use_4addr) break; fallthrough; case NL80211_IFTYPE_AP: fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); /* DA BSSID SA */ memcpy(hdr.addr1, skb->data, ETH_ALEN); if (ieee80211_vif_is_mld(&sdata->vif) && sta && !sta->sta.mlo) { struct ieee80211_link_data *link; link_id = sta->deflink.link_id; link = rcu_dereference(sdata->link[link_id]); if (WARN_ON(!link)) { ret = -ENOLINK; goto free; } memcpy(hdr.addr2, link->conf->addr, ETH_ALEN); } else if (link_id == IEEE80211_LINK_UNSPECIFIED || (sta && sta->sta.mlo)) { memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); } else { struct ieee80211_bss_conf *conf; conf = rcu_dereference(sdata->vif.link_conf[link_id]); if (unlikely(!conf)) { ret = -ENOLINK; goto free; } memcpy(hdr.addr2, conf->addr, ETH_ALEN); } memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN); hdrlen = 24; break; #ifdef CONFIG_MAC80211_MESH case NL80211_IFTYPE_MESH_POINT: if (!is_multicast_ether_addr(skb->data)) { struct sta_info *next_hop; bool mpp_lookup = true; mpath = mesh_path_lookup(sdata, skb->data); if (mpath) { mpp_lookup = false; next_hop = rcu_dereference(mpath->next_hop); if (!next_hop || !(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING))) mpp_lookup = true; } if (mpp_lookup) { mppath = mpp_path_lookup(sdata, skb->data); if (mppath) mppath->exp_time = jiffies; } if (mppath && mpath) mesh_path_del(sdata, mpath->dst); } /* * Use address extension if it is a packet from * another interface or if we know the destination * is being proxied by a portal (i.e. portal address * differs from proxied address) */ if (ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN) && !(mppath && !ether_addr_equal(mppath->mpp, skb->data))) { hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, skb->data, skb->data + ETH_ALEN); meshhdrlen = ieee80211_new_mesh_header(sdata, &mesh_hdr, NULL, NULL); } else { /* DS -> MBSS (802.11-2012 13.11.3.3). * For unicast with unknown forwarding information, * destination might be in the MBSS or if that fails * forwarded to another mesh gate. In either case * resolution will be handled in ieee80211_xmit(), so * leave the original DA. This also works for mcast */ const u8 *mesh_da = skb->data; if (mppath) mesh_da = mppath->mpp; else if (mpath) mesh_da = mpath->dst; hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, mesh_da, sdata->vif.addr); if (is_multicast_ether_addr(mesh_da)) /* DA TA mSA AE:SA */ meshhdrlen = ieee80211_new_mesh_header( sdata, &mesh_hdr, skb->data + ETH_ALEN, NULL); else /* RA TA mDA mSA AE:DA SA */ meshhdrlen = ieee80211_new_mesh_header( sdata, &mesh_hdr, skb->data, skb->data + ETH_ALEN); } /* For injected frames, fill RA right away as nexthop lookup * will be skipped. */ if ((ctrl_flags & IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP) && is_zero_ether_addr(hdr.addr1)) memcpy(hdr.addr1, skb->data, ETH_ALEN); break; #endif case NL80211_IFTYPE_STATION: /* we already did checks when looking up the RA STA */ tdls_peer = test_sta_flag(sta, WLAN_STA_TDLS_PEER); if (tdls_peer) { /* For TDLS only one link can be valid with peer STA */ int tdls_link_id = ieee80211_tdls_sta_link_id(sta); struct ieee80211_link_data *link; /* DA SA BSSID */ memcpy(hdr.addr1, skb->data, ETH_ALEN); memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); link = rcu_dereference(sdata->link[tdls_link_id]); if (WARN_ON_ONCE(!link)) { ret = -EINVAL; goto free; } memcpy(hdr.addr3, link->u.mgd.bssid, ETH_ALEN); hdrlen = 24; } else if (sdata->u.mgd.use_4addr && cpu_to_be16(ethertype) != sdata->control_port_protocol) { fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); /* RA TA DA SA */ memcpy(hdr.addr1, sdata->deflink.u.mgd.bssid, ETH_ALEN); memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); memcpy(hdr.addr3, skb->data, ETH_ALEN); memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); hdrlen = 30; } else { fc |= cpu_to_le16(IEEE80211_FCTL_TODS); /* BSSID SA DA */ memcpy(hdr.addr1, sdata->vif.cfg.ap_addr, ETH_ALEN); memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); memcpy(hdr.addr3, skb->data, ETH_ALEN); hdrlen = 24; } break; case NL80211_IFTYPE_OCB: /* DA SA BSSID */ memcpy(hdr.addr1, skb->data, ETH_ALEN); memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); eth_broadcast_addr(hdr.addr3); hdrlen = 24; break; case NL80211_IFTYPE_ADHOC: /* DA SA BSSID */ memcpy(hdr.addr1, skb->data, ETH_ALEN); memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); memcpy(hdr.addr3, sdata->u.ibss.bssid, ETH_ALEN); hdrlen = 24; break; default: ret = -EINVAL; goto free; } if (!chanctx_conf) { if (!ieee80211_vif_is_mld(&sdata->vif)) { ret = -ENOTCONN; goto free; } /* MLD transmissions must not rely on the band */ band = 0; } else { band = chanctx_conf->def.chan->band; } multicast = is_multicast_ether_addr(hdr.addr1); /* sta is always NULL for mesh */ if (sta) { authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED); wme_sta = sta->sta.wme; } else if (ieee80211_vif_is_mesh(&sdata->vif)) { /* For mesh, the use of the QoS header is mandatory */ wme_sta = true; } /* receiver does QoS (which also means we do) use it */ if (wme_sta) { fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); hdrlen += 2; } /* * Drop unicast frames to unauthorised stations unless they are * EAPOL frames from the local station. */ if (unlikely(!ieee80211_vif_is_mesh(&sdata->vif) && (sdata->vif.type != NL80211_IFTYPE_OCB) && !multicast && !authorized && (cpu_to_be16(ethertype) != sdata->control_port_protocol || !ieee80211_is_our_addr(sdata, skb->data + ETH_ALEN, NULL)))) { #ifdef CONFIG_MAC80211_VERBOSE_DEBUG net_info_ratelimited("%s: dropped frame to %pM (unauthorized port)\n", sdata->name, hdr.addr1); #endif I802_DEBUG_INC(local->tx_handlers_drop_unauth_port); ret = -EPERM; goto free; } if (unlikely(!multicast && (sk_requests_wifi_status(skb->sk) || ctrl_flags & IEEE80211_TX_CTL_REQ_TX_STATUS))) info_id = ieee80211_store_ack_skb(local, skb, &info_flags, cookie); /* * If the skb is shared we need to obtain our own copy. */ skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) { ret = -ENOMEM; goto free; } hdr.frame_control = fc; hdr.duration_id = 0; hdr.seq_ctrl = 0; skip_header_bytes = ETH_HLEN; if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) { encaps_data = bridge_tunnel_header; encaps_len = sizeof(bridge_tunnel_header); skip_header_bytes -= 2; } else if (ethertype >= ETH_P_802_3_MIN) { encaps_data = rfc1042_header; encaps_len = sizeof(rfc1042_header); skip_header_bytes -= 2; } else { encaps_data = NULL; encaps_len = 0; } skb_pull(skb, skip_header_bytes); head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb); /* * So we need to modify the skb header and hence need a copy of * that. The head_need variable above doesn't, so far, include * the needed header space that we don't need right away. If we * can, then we don't reallocate right now but only after the * frame arrives at the master device (if it does...) * * If we cannot, however, then we will reallocate to include all * the ever needed space. Also, if we need to reallocate it anyway, * make it big enough for everything we may ever need. */ if (head_need > 0 || skb_cloned(skb)) { head_need += IEEE80211_ENCRYPT_HEADROOM; head_need += local->tx_headroom; head_need = max_t(int, 0, head_need); if (ieee80211_skb_resize(sdata, skb, head_need, ENCRYPT_DATA)) { ieee80211_free_txskb(&local->hw, skb); skb = NULL; return ERR_PTR(-ENOMEM); } } if (encaps_data) memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len); #ifdef CONFIG_MAC80211_MESH if (meshhdrlen > 0) memcpy(skb_push(skb, meshhdrlen), &mesh_hdr, meshhdrlen); #endif if (ieee80211_is_data_qos(fc)) { __le16 *qos_control; qos_control = skb_push(skb, 2); memcpy(skb_push(skb, hdrlen - 2), &hdr, hdrlen - 2); /* * Maybe we could actually set some fields here, for now just * initialise to zero to indicate no special operation. */ *qos_control = 0; } else memcpy(skb_push(skb, hdrlen), &hdr, hdrlen); skb_reset_mac_header(skb); info = IEEE80211_SKB_CB(skb); memset(info, 0, sizeof(*info)); info->flags = info_flags; if (info_id) { info->status_data = info_id; info->status_data_idr = 1; } info->band = band; if (likely(!cookie)) { ctrl_flags |= u32_encode_bits(link_id, IEEE80211_TX_CTRL_MLO_LINK); } else { unsigned int pre_conf_link_id; /* * ctrl_flags already have been set by * ieee80211_tx_control_port(), here * we just sanity check that */ pre_conf_link_id = u32_get_bits(ctrl_flags, IEEE80211_TX_CTRL_MLO_LINK); if (pre_conf_link_id != link_id && link_id != IEEE80211_LINK_UNSPECIFIED) { #ifdef CONFIG_MAC80211_VERBOSE_DEBUG net_info_ratelimited("%s: dropped frame to %pM with bad link ID request (%d vs. %d)\n", sdata->name, hdr.addr1, pre_conf_link_id, link_id); #endif ret = -EINVAL; goto free; } } info->control.flags = ctrl_flags; return skb; free: kfree_skb(skb); return ERR_PTR(ret); } /* * fast-xmit overview * * The core idea of this fast-xmit is to remove per-packet checks by checking * them out of band. ieee80211_check_fast_xmit() implements the out-of-band * checks that are needed to get the sta->fast_tx pointer assigned, after which * much less work can be done per packet. For example, fragmentation must be * disabled or the fast_tx pointer will not be set. All the conditions are seen * in the code here. * * Once assigned, the fast_tx data structure also caches the per-packet 802.11 * header and other data to aid packet processing in ieee80211_xmit_fast(). * * The most difficult part of this is that when any of these assumptions * change, an external trigger (i.e. a call to ieee80211_clear_fast_xmit(), * ieee80211_check_fast_xmit() or friends) is required to reset the data, * since the per-packet code no longer checks the conditions. This is reflected * by the calls to these functions throughout the rest of the code, and must be * maintained if any of the TX path checks change. */ void ieee80211_check_fast_xmit(struct sta_info *sta) { struct ieee80211_fast_tx build = {}, *fast_tx = NULL, *old; struct ieee80211_local *local = sta->local; struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_hdr *hdr = (void *)build.hdr; struct ieee80211_chanctx_conf *chanctx_conf; __le16 fc; if (!ieee80211_hw_check(&local->hw, SUPPORT_FAST_XMIT)) return; if (ieee80211_vif_is_mesh(&sdata->vif)) mesh_fast_tx_flush_sta(sdata, sta); /* Locking here protects both the pointer itself, and against concurrent * invocations winning data access races to, e.g., the key pointer that * is used. * Without it, the invocation of this function right after the key * pointer changes wouldn't be sufficient, as another CPU could access * the pointer, then stall, and then do the cache update after the CPU * that invalidated the key. * With the locking, such scenarios cannot happen as the check for the * key and the fast-tx assignment are done atomically, so the CPU that * modifies the key will either wait or other one will see the key * cleared/changed already. */ spin_lock_bh(&sta->lock); if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) && !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) && sdata->vif.type == NL80211_IFTYPE_STATION) goto out; if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED) || !sta->uploaded) goto out; if (test_sta_flag(sta, WLAN_STA_PS_STA) || test_sta_flag(sta, WLAN_STA_PS_DRIVER) || test_sta_flag(sta, WLAN_STA_PS_DELIVER) || test_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT)) goto out; if (sdata->noack_map) goto out; /* fast-xmit doesn't handle fragmentation at all */ if (local->hw.wiphy->frag_threshold != (u32)-1 && !ieee80211_hw_check(&local->hw, SUPPORTS_TX_FRAG)) goto out; if (!ieee80211_vif_is_mld(&sdata->vif)) { rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); if (!chanctx_conf) { rcu_read_unlock(); goto out; } build.band = chanctx_conf->def.chan->band; rcu_read_unlock(); } else { /* MLD transmissions must not rely on the band */ build.band = 0; } fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); switch (sdata->vif.type) { case NL80211_IFTYPE_ADHOC: /* DA SA BSSID */ build.da_offs = offsetof(struct ieee80211_hdr, addr1); build.sa_offs = offsetof(struct ieee80211_hdr, addr2); memcpy(hdr->addr3, sdata->u.ibss.bssid, ETH_ALEN); build.hdr_len = 24; break; case NL80211_IFTYPE_STATION: if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { /* For TDLS only one link can be valid with peer STA */ int tdls_link_id = ieee80211_tdls_sta_link_id(sta); struct ieee80211_link_data *link; /* DA SA BSSID */ build.da_offs = offsetof(struct ieee80211_hdr, addr1); build.sa_offs = offsetof(struct ieee80211_hdr, addr2); rcu_read_lock(); link = rcu_dereference(sdata->link[tdls_link_id]); if (!WARN_ON_ONCE(!link)) memcpy(hdr->addr3, link->u.mgd.bssid, ETH_ALEN); rcu_read_unlock(); build.hdr_len = 24; break; } if (sdata->u.mgd.use_4addr) { /* non-regular ethertype cannot use the fastpath */ fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); /* RA TA DA SA */ memcpy(hdr->addr1, sdata->deflink.u.mgd.bssid, ETH_ALEN); memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); build.da_offs = offsetof(struct ieee80211_hdr, addr3); build.sa_offs = offsetof(struct ieee80211_hdr, addr4); build.hdr_len = 30; break; } fc |= cpu_to_le16(IEEE80211_FCTL_TODS); /* BSSID SA DA */ memcpy(hdr->addr1, sdata->vif.cfg.ap_addr, ETH_ALEN); build.da_offs = offsetof(struct ieee80211_hdr, addr3); build.sa_offs = offsetof(struct ieee80211_hdr, addr2); build.hdr_len = 24; break; case NL80211_IFTYPE_AP_VLAN: if (sdata->wdev.use_4addr) { fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); /* RA TA DA SA */ memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); build.da_offs = offsetof(struct ieee80211_hdr, addr3); build.sa_offs = offsetof(struct ieee80211_hdr, addr4); build.hdr_len = 30; break; } fallthrough; case NL80211_IFTYPE_AP: fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); /* DA BSSID SA */ build.da_offs = offsetof(struct ieee80211_hdr, addr1); if (sta->sta.mlo || !ieee80211_vif_is_mld(&sdata->vif)) { memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); } else { unsigned int link_id = sta->deflink.link_id; struct ieee80211_link_data *link; rcu_read_lock(); link = rcu_dereference(sdata->link[link_id]); if (WARN_ON(!link)) { rcu_read_unlock(); goto out; } memcpy(hdr->addr2, link->conf->addr, ETH_ALEN); rcu_read_unlock(); } build.sa_offs = offsetof(struct ieee80211_hdr, addr3); build.hdr_len = 24; break; default: /* not handled on fast-xmit */ goto out; } if (sta->sta.wme) { build.hdr_len += 2; fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); } /* We store the key here so there's no point in using rcu_dereference() * but that's fine because the code that changes the pointers will call * this function after doing so. For a single CPU that would be enough, * for multiple see the comment above. */ build.key = rcu_access_pointer(sta->ptk[sta->ptk_idx]); if (!build.key) build.key = rcu_access_pointer(sdata->default_unicast_key); if (build.key) { bool gen_iv, iv_spc, mmic; gen_iv = build.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV; iv_spc = build.key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE; mmic = build.key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | IEEE80211_KEY_FLAG_PUT_MIC_SPACE); /* don't handle software crypto */ if (!(build.key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) goto out; /* Key is being removed */ if (build.key->flags & KEY_FLAG_TAINTED) goto out; switch (build.key->conf.cipher) { case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: if (gen_iv) build.pn_offs = build.hdr_len; if (gen_iv || iv_spc) build.hdr_len += IEEE80211_CCMP_HDR_LEN; break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (gen_iv) build.pn_offs = build.hdr_len; if (gen_iv || iv_spc) build.hdr_len += IEEE80211_GCMP_HDR_LEN; break; case WLAN_CIPHER_SUITE_TKIP: /* cannot handle MMIC or IV generation in xmit-fast */ if (mmic || gen_iv) goto out; if (iv_spc) build.hdr_len += IEEE80211_TKIP_IV_LEN; break; case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: /* cannot handle IV generation in fast-xmit */ if (gen_iv) goto out; if (iv_spc) build.hdr_len += IEEE80211_WEP_IV_LEN; break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: WARN(1, "management cipher suite 0x%x enabled for data\n", build.key->conf.cipher); goto out; default: /* we don't know how to generate IVs for this at all */ if (WARN_ON(gen_iv)) goto out; } fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); } hdr->frame_control = fc; memcpy(build.hdr + build.hdr_len, rfc1042_header, sizeof(rfc1042_header)); build.hdr_len += sizeof(rfc1042_header); fast_tx = kmemdup(&build, sizeof(build), GFP_ATOMIC); /* if the kmemdup fails, continue w/o fast_tx */ out: /* we might have raced against another call to this function */ old = rcu_dereference_protected(sta->fast_tx, lockdep_is_held(&sta->lock)); rcu_assign_pointer(sta->fast_tx, fast_tx); if (old) kfree_rcu(old, rcu_head); spin_unlock_bh(&sta->lock); } void ieee80211_check_fast_xmit_all(struct ieee80211_local *local) { struct sta_info *sta; rcu_read_lock(); list_for_each_entry_rcu(sta, &local->sta_list, list) ieee80211_check_fast_xmit(sta); rcu_read_unlock(); } void ieee80211_check_fast_xmit_iface(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct sta_info *sta; rcu_read_lock(); list_for_each_entry_rcu(sta, &local->sta_list, list) { if (sdata != sta->sdata && (!sta->sdata->bss || sta->sdata->bss != sdata->bss)) continue; ieee80211_check_fast_xmit(sta); } rcu_read_unlock(); } void ieee80211_clear_fast_xmit(struct sta_info *sta) { struct ieee80211_fast_tx *fast_tx; spin_lock_bh(&sta->lock); fast_tx = rcu_dereference_protected(sta->fast_tx, lockdep_is_held(&sta->lock)); RCU_INIT_POINTER(sta->fast_tx, NULL); spin_unlock_bh(&sta->lock); if (fast_tx) kfree_rcu(fast_tx, rcu_head); } static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local, struct sk_buff *skb, int headroom) { if (skb_headroom(skb) < headroom) { I802_DEBUG_INC(local->tx_expand_skb_head); if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) { wiphy_debug(local->hw.wiphy, "failed to reallocate TX buffer\n"); return false; } } return true; } static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata, struct ieee80211_fast_tx *fast_tx, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr; struct ethhdr *amsdu_hdr; int hdr_len = fast_tx->hdr_len - sizeof(rfc1042_header); int subframe_len = skb->len - hdr_len; void *data; u8 *qc, *h_80211_src, *h_80211_dst; const u8 *bssid; if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) return false; if (info->control.flags & IEEE80211_TX_CTRL_AMSDU) return true; if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr) + local->hw.extra_tx_headroom)) return false; data = skb_push(skb, sizeof(*amsdu_hdr)); memmove(data, data + sizeof(*amsdu_hdr), hdr_len); hdr = data; amsdu_hdr = data + hdr_len; /* h_80211_src/dst is addr* field within hdr */ h_80211_src = data + fast_tx->sa_offs; h_80211_dst = data + fast_tx->da_offs; amsdu_hdr->h_proto = cpu_to_be16(subframe_len); ether_addr_copy(amsdu_hdr->h_source, h_80211_src); ether_addr_copy(amsdu_hdr->h_dest, h_80211_dst); /* according to IEEE 802.11-2012 8.3.2 table 8-19, the outer SA/DA * fields needs to be changed to BSSID for A-MSDU frames depending * on FromDS/ToDS values. */ switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: bssid = sdata->vif.cfg.ap_addr; break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: bssid = sdata->vif.addr; break; default: bssid = NULL; } if (bssid && ieee80211_has_fromds(hdr->frame_control)) ether_addr_copy(h_80211_src, bssid); if (bssid && ieee80211_has_tods(hdr->frame_control)) ether_addr_copy(h_80211_dst, bssid); qc = ieee80211_get_qos_ctl(hdr); *qc |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; info->control.flags |= IEEE80211_TX_CTRL_AMSDU; return true; } static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct ieee80211_fast_tx *fast_tx, struct sk_buff *skb, const u8 *da, const u8 *sa) { struct ieee80211_local *local = sdata->local; struct fq *fq = &local->fq; struct fq_tin *tin; struct fq_flow *flow; u8 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; struct ieee80211_txq *txq = sta->sta.txq[tid]; struct txq_info *txqi; struct sk_buff **frag_tail, *head; int subframe_len = skb->len - ETH_ALEN; u8 max_subframes = sta->sta.max_amsdu_subframes; int max_frags = local->hw.max_tx_fragments; int max_amsdu_len = sta->sta.cur->max_amsdu_len; int orig_truesize; u32 flow_idx; __be16 len; void *data; bool ret = false; unsigned int orig_len; int n = 2, nfrags, pad = 0; u16 hdrlen; if (!ieee80211_hw_check(&local->hw, TX_AMSDU)) return false; if (sdata->vif.offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED) return false; if (ieee80211_vif_is_mesh(&sdata->vif)) return false; if (skb_is_gso(skb)) return false; if (!txq) return false; txqi = to_txq_info(txq); if (test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags)) return false; if (sta->sta.cur->max_rc_amsdu_len) max_amsdu_len = min_t(int, max_amsdu_len, sta->sta.cur->max_rc_amsdu_len); if (sta->sta.cur->max_tid_amsdu_len[tid]) max_amsdu_len = min_t(int, max_amsdu_len, sta->sta.cur->max_tid_amsdu_len[tid]); flow_idx = fq_flow_idx(fq, skb); spin_lock_bh(&fq->lock); /* TODO: Ideally aggregation should be done on dequeue to remain * responsive to environment changes. */ tin = &txqi->tin; flow = fq_flow_classify(fq, tin, flow_idx, skb); head = skb_peek_tail(&flow->queue); if (!head || skb_is_gso(head)) goto out; orig_truesize = head->truesize; orig_len = head->len; if (skb->len + head->len > max_amsdu_len) goto out; nfrags = 1 + skb_shinfo(skb)->nr_frags; nfrags += 1 + skb_shinfo(head)->nr_frags; frag_tail = &skb_shinfo(head)->frag_list; while (*frag_tail) { nfrags += 1 + skb_shinfo(*frag_tail)->nr_frags; frag_tail = &(*frag_tail)->next; n++; } if (max_subframes && n > max_subframes) goto out; if (max_frags && nfrags > max_frags) goto out; if (!drv_can_aggregate_in_amsdu(local, head, skb)) goto out; if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head)) goto out; /* If n == 2, the "while (*frag_tail)" loop above didn't execute * and frag_tail should be &skb_shinfo(head)->frag_list. * However, ieee80211_amsdu_prepare_head() can reallocate it. * Reload frag_tail to have it pointing to the correct place. */ if (n == 2) frag_tail = &skb_shinfo(head)->frag_list; /* * Pad out the previous subframe to a multiple of 4 by adding the * padding to the next one, that's being added. Note that head->len * is the length of the full A-MSDU, but that works since each time * we add a new subframe we pad out the previous one to a multiple * of 4 and thus it no longer matters in the next round. */ hdrlen = fast_tx->hdr_len - sizeof(rfc1042_header); if ((head->len - hdrlen) & 3) pad = 4 - ((head->len - hdrlen) & 3); if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) + 2 + pad)) goto out_recalc; ret = true; data = skb_push(skb, ETH_ALEN + 2); ether_addr_copy(data, da); ether_addr_copy(data + ETH_ALEN, sa); data += 2 * ETH_ALEN; len = cpu_to_be16(subframe_len); memcpy(data, &len, 2); memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header)); memset(skb_push(skb, pad), 0, pad); head->len += skb->len; head->data_len += skb->len; *frag_tail = skb; out_recalc: fq->memory_usage += head->truesize - orig_truesize; if (head->len != orig_len) { flow->backlog += head->len - orig_len; tin->backlog_bytes += head->len - orig_len; } out: spin_unlock_bh(&fq->lock); return ret; } /* * Can be called while the sta lock is held. Anything that can cause packets to * be generated will cause deadlock! */ static ieee80211_tx_result ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, u8 pn_offs, struct ieee80211_key *key, struct ieee80211_tx_data *tx) { struct sk_buff *skb = tx->skb; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; u8 tid = IEEE80211_NUM_TIDS; if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL) && ieee80211_tx_h_rate_ctrl(tx) != TX_CONTINUE) return TX_DROP; if (key) info->control.hw_key = &key->conf; dev_sw_netstats_tx_add(skb->dev, 1, skb->len); if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid); } else { info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; hdr->seq_ctrl = cpu_to_le16(sdata->sequence_number); sdata->sequence_number += 0x10; } if (skb_shinfo(skb)->gso_size) sta->deflink.tx_stats.msdu[tid] += DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size); else sta->deflink.tx_stats.msdu[tid]++; info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)]; /* statistics normally done by ieee80211_tx_h_stats (but that * has to consider fragmentation, so is more complex) */ sta->deflink.tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len; sta->deflink.tx_stats.packets[skb_get_queue_mapping(skb)]++; if (pn_offs) { u64 pn; u8 *crypto_hdr = skb->data + pn_offs; switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: pn = atomic64_inc_return(&key->conf.tx_pn); crypto_hdr[0] = pn; crypto_hdr[1] = pn >> 8; crypto_hdr[3] = 0x20 | (key->conf.keyidx << 6); crypto_hdr[4] = pn >> 16; crypto_hdr[5] = pn >> 24; crypto_hdr[6] = pn >> 32; crypto_hdr[7] = pn >> 40; break; } } return TX_CONTINUE; } static netdev_features_t ieee80211_sdata_netdev_features(struct ieee80211_sub_if_data *sdata) { if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN) return sdata->vif.netdev_features; if (!sdata->bss) return 0; sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); return sdata->vif.netdev_features; } static struct sk_buff * ieee80211_tx_skb_fixup(struct sk_buff *skb, netdev_features_t features) { if (skb_is_gso(skb)) { struct sk_buff *segs; segs = skb_gso_segment(skb, features); if (!segs) return skb; if (IS_ERR(segs)) goto free; consume_skb(skb); return segs; } if (skb_needs_linearize(skb, features) && __skb_linearize(skb)) goto free; if (skb->ip_summed == CHECKSUM_PARTIAL) { int ofs = skb_checksum_start_offset(skb); if (skb->encapsulation) skb_set_inner_transport_header(skb, ofs); else skb_set_transport_header(skb, ofs); if (skb_csum_hwoffload_help(skb, features)) goto free; } skb_mark_not_on_list(skb); return skb; free: kfree_skb(skb); return NULL; } void __ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct ieee80211_fast_tx *fast_tx, struct sk_buff *skb, bool ampdu, const u8 *da, const u8 *sa) { struct ieee80211_local *local = sdata->local; struct ieee80211_hdr *hdr = (void *)fast_tx->hdr; struct ieee80211_tx_info *info; struct ieee80211_tx_data tx; ieee80211_tx_result r; int hw_headroom = sdata->local->hw.extra_tx_headroom; int extra_head = fast_tx->hdr_len - (ETH_HLEN - 2); skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) return; if ((hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) && ieee80211_amsdu_aggregate(sdata, sta, fast_tx, skb, da, sa)) return; /* will not be crypto-handled beyond what we do here, so use false * as the may-encrypt argument for the resize to not account for * more room than we already have in 'extra_head' */ if (unlikely(ieee80211_skb_resize(sdata, skb, max_t(int, extra_head + hw_headroom - skb_headroom(skb), 0), ENCRYPT_NO))) goto free; hdr = skb_push(skb, extra_head); memcpy(skb->data, fast_tx->hdr, fast_tx->hdr_len); memcpy(skb->data + fast_tx->da_offs, da, ETH_ALEN); memcpy(skb->data + fast_tx->sa_offs, sa, ETH_ALEN); info = IEEE80211_SKB_CB(skb); memset(info, 0, sizeof(*info)); info->band = fast_tx->band; info->control.vif = &sdata->vif; info->flags = IEEE80211_TX_CTL_FIRST_FRAGMENT | IEEE80211_TX_CTL_DONTFRAG; info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT | u32_encode_bits(IEEE80211_LINK_UNSPECIFIED, IEEE80211_TX_CTRL_MLO_LINK); #ifdef CONFIG_MAC80211_DEBUGFS if (local->force_tx_status) info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; #endif if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { u8 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; *ieee80211_get_qos_ctl(hdr) = tid; } __skb_queue_head_init(&tx.skbs); tx.flags = IEEE80211_TX_UNICAST; tx.local = local; tx.sdata = sdata; tx.sta = sta; tx.key = fast_tx->key; if (ieee80211_queue_skb(local, sdata, sta, skb)) return; tx.skb = skb; r = ieee80211_xmit_fast_finish(sdata, sta, fast_tx->pn_offs, fast_tx->key, &tx); tx.skb = NULL; if (r == TX_DROP) { tx.sdata->tx_handlers_drop++; goto free; } if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); __skb_queue_tail(&tx.skbs, skb); ieee80211_tx_frags(local, &sdata->vif, sta, &tx.skbs, false); return; free: kfree_skb(skb); } static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct ieee80211_fast_tx *fast_tx, struct sk_buff *skb) { u16 ethertype = (skb->data[12] << 8) | skb->data[13]; struct ieee80211_hdr *hdr = (void *)fast_tx->hdr; struct tid_ampdu_tx *tid_tx = NULL; struct sk_buff *next; struct ethhdr eth; u8 tid = IEEE80211_NUM_TIDS; /* control port protocol needs a lot of special handling */ if (cpu_to_be16(ethertype) == sdata->control_port_protocol) return false; /* only RFC 1042 SNAP */ if (ethertype < ETH_P_802_3_MIN) return false; /* don't handle TX status request here either */ if (sk_requests_wifi_status(skb->sk)) return false; if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]); if (tid_tx) { if (!test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) return false; if (tid_tx->timeout) tid_tx->last_tx = jiffies; } } memcpy(&eth, skb->data, ETH_HLEN - 2); /* after this point (skb is modified) we cannot return false */ skb = ieee80211_tx_skb_fixup(skb, ieee80211_sdata_netdev_features(sdata)); if (!skb) return true; skb_list_walk_safe(skb, skb, next) { skb_mark_not_on_list(skb); __ieee80211_xmit_fast(sdata, sta, fast_tx, skb, tid_tx, eth.h_dest, eth.h_source); } return true; } struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { struct ieee80211_local *local = hw_to_local(hw); struct txq_info *txqi = container_of(txq, struct txq_info, txq); struct ieee80211_hdr *hdr; struct sk_buff *skb = NULL; struct fq *fq = &local->fq; struct fq_tin *tin = &txqi->tin; struct ieee80211_tx_info *info; struct ieee80211_tx_data tx; ieee80211_tx_result r; struct ieee80211_vif *vif = txq->vif; int q = vif->hw_queue[txq->ac]; unsigned long flags; bool q_stopped; WARN_ON_ONCE(softirq_count() == 0); if (!ieee80211_txq_airtime_check(hw, txq)) return NULL; begin: spin_lock_irqsave(&local->queue_stop_reason_lock, flags); q_stopped = local->queue_stop_reasons[q]; spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); if (unlikely(q_stopped)) { /* mark for waking later */ set_bit(IEEE80211_TXQ_DIRTY, &txqi->flags); return NULL; } spin_lock_bh(&fq->lock); /* Make sure fragments stay together. */ skb = __skb_dequeue(&txqi->frags); if (unlikely(skb)) { if (!(IEEE80211_SKB_CB(skb)->control.flags & IEEE80211_TX_INTCFL_NEED_TXPROCESSING)) goto out; IEEE80211_SKB_CB(skb)->control.flags &= ~IEEE80211_TX_INTCFL_NEED_TXPROCESSING; } else { if (unlikely(test_bit(IEEE80211_TXQ_STOP, &txqi->flags))) goto out; skb = fq_tin_dequeue(fq, tin, fq_tin_dequeue_func); } if (!skb) goto out; spin_unlock_bh(&fq->lock); hdr = (struct ieee80211_hdr *)skb->data; info = IEEE80211_SKB_CB(skb); memset(&tx, 0, sizeof(tx)); __skb_queue_head_init(&tx.skbs); tx.local = local; tx.skb = skb; tx.sdata = vif_to_sdata(info->control.vif); if (txq->sta) { tx.sta = container_of(txq->sta, struct sta_info, sta); /* * Drop unicast frames to unauthorised stations unless they are * injected frames or EAPOL frames from the local station. */ if (unlikely(!(info->flags & IEEE80211_TX_CTL_INJECTED) && ieee80211_is_data(hdr->frame_control) && !ieee80211_vif_is_mesh(&tx.sdata->vif) && tx.sdata->vif.type != NL80211_IFTYPE_OCB && !is_multicast_ether_addr(hdr->addr1) && !test_sta_flag(tx.sta, WLAN_STA_AUTHORIZED) && (!(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) || !ieee80211_is_our_addr(tx.sdata, hdr->addr2, NULL)))) { I802_DEBUG_INC(local->tx_handlers_drop_unauth_port); ieee80211_free_txskb(&local->hw, skb); goto begin; } } /* * The key can be removed while the packet was queued, so need to call * this here to get the current key. */ info->control.hw_key = NULL; r = ieee80211_tx_h_select_key(&tx); if (r != TX_CONTINUE) { ieee80211_free_txskb(&local->hw, skb); goto begin; } if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags)) info->flags |= (IEEE80211_TX_CTL_AMPDU | IEEE80211_TX_CTL_DONTFRAG); if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) { if (!ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) { r = ieee80211_tx_h_rate_ctrl(&tx); if (r != TX_CONTINUE) { ieee80211_free_txskb(&local->hw, skb); goto begin; } } goto encap_out; } if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) { struct sta_info *sta = container_of(txq->sta, struct sta_info, sta); u8 pn_offs = 0; if (tx.key && (tx.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) pn_offs = ieee80211_hdrlen(hdr->frame_control); r = ieee80211_xmit_fast_finish(sta->sdata, sta, pn_offs, tx.key, &tx); if (r != TX_CONTINUE) { ieee80211_free_txskb(&local->hw, skb); goto begin; } } else { if (invoke_tx_handlers_late(&tx)) goto begin; skb = __skb_dequeue(&tx.skbs); info = IEEE80211_SKB_CB(skb); if (!skb_queue_empty(&tx.skbs)) { spin_lock_bh(&fq->lock); skb_queue_splice_tail(&tx.skbs, &txqi->frags); spin_unlock_bh(&fq->lock); } } if (skb_has_frag_list(skb) && !ieee80211_hw_check(&local->hw, TX_FRAG_LIST)) { if (skb_linearize(skb)) { ieee80211_free_txskb(&local->hw, skb); goto begin; } } switch (tx.sdata->vif.type) { case NL80211_IFTYPE_MONITOR: if ((tx.sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) || ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR)) { vif = &tx.sdata->vif; break; } tx.sdata = rcu_dereference(local->monitor_sdata); if (tx.sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) { vif = &tx.sdata->vif; info->hw_queue = vif->hw_queue[skb_get_queue_mapping(skb)]; } else if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) { ieee80211_free_txskb(&local->hw, skb); goto begin; } else { info->control.vif = NULL; return skb; } break; case NL80211_IFTYPE_AP_VLAN: tx.sdata = container_of(tx.sdata->bss, struct ieee80211_sub_if_data, u.ap); fallthrough; default: vif = &tx.sdata->vif; break; } encap_out: info->control.vif = vif; if (tx.sta && wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) { bool ampdu = txq->ac != IEEE80211_AC_VO; u32 airtime; airtime = ieee80211_calc_expected_tx_airtime(hw, vif, txq->sta, skb->len, ampdu); if (airtime) { airtime = ieee80211_info_set_tx_time_est(info, airtime); ieee80211_sta_update_pending_airtime(local, tx.sta, txq->ac, airtime, false); } } return skb; out: spin_unlock_bh(&fq->lock); return skb; } EXPORT_SYMBOL(ieee80211_tx_dequeue); static inline s32 ieee80211_sta_deficit(struct sta_info *sta, u8 ac) { struct airtime_info *air_info = &sta->airtime[ac]; return air_info->deficit - atomic_read(&air_info->aql_tx_pending); } static void ieee80211_txq_set_active(struct txq_info *txqi) { struct sta_info *sta; if (!txqi->txq.sta) return; sta = container_of(txqi->txq.sta, struct sta_info, sta); sta->airtime[txqi->txq.ac].last_active = jiffies; } static bool ieee80211_txq_keep_active(struct txq_info *txqi) { struct sta_info *sta; if (!txqi->txq.sta) return false; sta = container_of(txqi->txq.sta, struct sta_info, sta); if (ieee80211_sta_deficit(sta, txqi->txq.ac) >= 0) return false; return ieee80211_sta_keep_active(sta, txqi->txq.ac); } struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_txq *ret = NULL; struct txq_info *txqi = NULL, *head = NULL; bool found_eligible_txq = false; spin_lock_bh(&local->active_txq_lock[ac]); if (!local->schedule_round[ac]) goto out; begin: txqi = list_first_entry_or_null(&local->active_txqs[ac], struct txq_info, schedule_order); if (!txqi) goto out; if (txqi == head) { if (!found_eligible_txq) goto out; else found_eligible_txq = false; } if (!head) head = txqi; if (txqi->txq.sta) { struct sta_info *sta = container_of(txqi->txq.sta, struct sta_info, sta); bool aql_check = ieee80211_txq_airtime_check(hw, &txqi->txq); s32 deficit = ieee80211_sta_deficit(sta, txqi->txq.ac); if (aql_check) found_eligible_txq = true; if (deficit < 0) sta->airtime[txqi->txq.ac].deficit += sta->airtime_weight; if (deficit < 0 || !aql_check) { list_move_tail(&txqi->schedule_order, &local->active_txqs[txqi->txq.ac]); goto begin; } } if (txqi->schedule_round == local->schedule_round[ac]) goto out; list_del_init(&txqi->schedule_order); txqi->schedule_round = local->schedule_round[ac]; ret = &txqi->txq; out: spin_unlock_bh(&local->active_txq_lock[ac]); return ret; } EXPORT_SYMBOL(ieee80211_next_txq); void __ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq, bool force) { struct ieee80211_local *local = hw_to_local(hw); struct txq_info *txqi = to_txq_info(txq); bool has_queue; spin_lock_bh(&local->active_txq_lock[txq->ac]); has_queue = force || (!test_bit(IEEE80211_TXQ_STOP, &txqi->flags) && txq_has_queue(txq)); if (list_empty(&txqi->schedule_order) && (has_queue || ieee80211_txq_keep_active(txqi))) { /* If airtime accounting is active, always enqueue STAs at the * head of the list to ensure that they only get moved to the * back by the airtime DRR scheduler once they have a negative * deficit. A station that already has a negative deficit will * get immediately moved to the back of the list on the next * call to ieee80211_next_txq(). */ if (txqi->txq.sta && local->airtime_flags && has_queue && wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS)) list_add(&txqi->schedule_order, &local->active_txqs[txq->ac]); else list_add_tail(&txqi->schedule_order, &local->active_txqs[txq->ac]); if (has_queue) ieee80211_txq_set_active(txqi); } spin_unlock_bh(&local->active_txq_lock[txq->ac]); } EXPORT_SYMBOL(__ieee80211_schedule_txq); DEFINE_STATIC_KEY_FALSE(aql_disable); bool ieee80211_txq_airtime_check(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { struct sta_info *sta; struct ieee80211_local *local = hw_to_local(hw); if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) return true; if (static_branch_unlikely(&aql_disable)) return true; if (!txq->sta) return true; if (unlikely(txq->tid == IEEE80211_NUM_TIDS)) return true; sta = container_of(txq->sta, struct sta_info, sta); if (atomic_read(&sta->airtime[txq->ac].aql_tx_pending) < sta->airtime[txq->ac].aql_limit_low) return true; if (atomic_read(&local->aql_total_pending_airtime) < local->aql_threshold && atomic_read(&sta->airtime[txq->ac].aql_tx_pending) < sta->airtime[txq->ac].aql_limit_high) return true; return false; } EXPORT_SYMBOL(ieee80211_txq_airtime_check); static bool ieee80211_txq_schedule_airtime_check(struct ieee80211_local *local, u8 ac) { unsigned int num_txq = 0; struct txq_info *txq; u32 aql_limit; if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) return true; list_for_each_entry(txq, &local->active_txqs[ac], schedule_order) num_txq++; aql_limit = (num_txq - 1) * local->aql_txq_limit_low[ac] / 2 + local->aql_txq_limit_high[ac]; return atomic_read(&local->aql_ac_pending_airtime[ac]) < aql_limit; } bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { struct ieee80211_local *local = hw_to_local(hw); struct txq_info *iter, *tmp, *txqi = to_txq_info(txq); struct sta_info *sta; u8 ac = txq->ac; spin_lock_bh(&local->active_txq_lock[ac]); if (!txqi->txq.sta) goto out; if (list_empty(&txqi->schedule_order)) goto out; if (!ieee80211_txq_schedule_airtime_check(local, ac)) goto out; list_for_each_entry_safe(iter, tmp, &local->active_txqs[ac], schedule_order) { if (iter == txqi) break; if (!iter->txq.sta) { list_move_tail(&iter->schedule_order, &local->active_txqs[ac]); continue; } sta = container_of(iter->txq.sta, struct sta_info, sta); if (ieee80211_sta_deficit(sta, ac) < 0) sta->airtime[ac].deficit += sta->airtime_weight; list_move_tail(&iter->schedule_order, &local->active_txqs[ac]); } sta = container_of(txqi->txq.sta, struct sta_info, sta); if (sta->airtime[ac].deficit >= 0) goto out; sta->airtime[ac].deficit += sta->airtime_weight; list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]); spin_unlock_bh(&local->active_txq_lock[ac]); return false; out: if (!list_empty(&txqi->schedule_order)) list_del_init(&txqi->schedule_order); spin_unlock_bh(&local->active_txq_lock[ac]); return true; } EXPORT_SYMBOL(ieee80211_txq_may_transmit); void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac) { struct ieee80211_local *local = hw_to_local(hw); spin_lock_bh(&local->active_txq_lock[ac]); if (ieee80211_txq_schedule_airtime_check(local, ac)) { local->schedule_round[ac]++; if (!local->schedule_round[ac]) local->schedule_round[ac]++; } else { local->schedule_round[ac] = 0; } spin_unlock_bh(&local->active_txq_lock[ac]); } EXPORT_SYMBOL(ieee80211_txq_schedule_start); void __ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev, u32 info_flags, u32 ctrl_flags, u64 *cookie) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct sta_info *sta; struct sk_buff *next; int len = skb->len; if (unlikely(!ieee80211_sdata_running(sdata) || skb->len < ETH_HLEN)) { kfree_skb(skb); return; } sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift); rcu_read_lock(); if (ieee80211_vif_is_mesh(&sdata->vif) && ieee80211_hw_check(&local->hw, SUPPORT_FAST_XMIT) && ieee80211_mesh_xmit_fast(sdata, skb, ctrl_flags)) goto out; if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) goto out_free; if (IS_ERR(sta)) sta = NULL; skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, sta, skb)); ieee80211_aggr_check(sdata, sta, skb); if (sta) { struct ieee80211_fast_tx *fast_tx; fast_tx = rcu_dereference(sta->fast_tx); if (fast_tx && ieee80211_xmit_fast(sdata, sta, fast_tx, skb)) goto out; } /* the frame could be fragmented, software-encrypted, and other * things so we cannot really handle checksum or GSO offload. * fix it up in software before we handle anything else. */ skb = ieee80211_tx_skb_fixup(skb, 0); if (!skb) { len = 0; goto out; } skb_list_walk_safe(skb, skb, next) { skb_mark_not_on_list(skb); if (skb->protocol == sdata->control_port_protocol) ctrl_flags |= IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP; skb = ieee80211_build_hdr(sdata, skb, info_flags, sta, ctrl_flags, cookie); if (IS_ERR(skb)) { kfree_skb_list(next); goto out; } dev_sw_netstats_tx_add(dev, 1, skb->len); ieee80211_xmit(sdata, sta, skb); } goto out; out_free: kfree_skb(skb); len = 0; out: if (len) ieee80211_tpt_led_trig_tx(local, len); rcu_read_unlock(); } static int ieee80211_change_da(struct sk_buff *skb, struct sta_info *sta) { struct ethhdr *eth; int err; err = skb_ensure_writable(skb, ETH_HLEN); if (unlikely(err)) return err; eth = (void *)skb->data; ether_addr_copy(eth->h_dest, sta->sta.addr); return 0; } static bool ieee80211_multicast_to_unicast(struct sk_buff *skb, struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); const struct ethhdr *eth = (void *)skb->data; const struct vlan_ethhdr *ethvlan = (void *)skb->data; __be16 ethertype; switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: if (sdata->u.vlan.sta) return false; if (sdata->wdev.use_4addr) return false; fallthrough; case NL80211_IFTYPE_AP: /* check runtime toggle for this bss */ if (!sdata->bss->multicast_to_unicast) return false; break; default: return false; } /* multicast to unicast conversion only for some payload */ ethertype = eth->h_proto; if (ethertype == htons(ETH_P_8021Q) && skb->len >= VLAN_ETH_HLEN) ethertype = ethvlan->h_vlan_encapsulated_proto; switch (ethertype) { case htons(ETH_P_ARP): case htons(ETH_P_IP): case htons(ETH_P_IPV6): break; default: return false; } return true; } static void ieee80211_convert_to_unicast(struct sk_buff *skb, struct net_device *dev, struct sk_buff_head *queue) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; const struct ethhdr *eth = (struct ethhdr *)skb->data; struct sta_info *sta, *first = NULL; struct sk_buff *cloned_skb; rcu_read_lock(); list_for_each_entry_rcu(sta, &local->sta_list, list) { if (sdata != sta->sdata) /* AP-VLAN mismatch */ continue; if (unlikely(ether_addr_equal(eth->h_source, sta->sta.addr))) /* do not send back to source */ continue; if (!first) { first = sta; continue; } cloned_skb = skb_clone(skb, GFP_ATOMIC); if (!cloned_skb) goto multicast; if (unlikely(ieee80211_change_da(cloned_skb, sta))) { dev_kfree_skb(cloned_skb); goto multicast; } __skb_queue_tail(queue, cloned_skb); } if (likely(first)) { if (unlikely(ieee80211_change_da(skb, first))) goto multicast; __skb_queue_tail(queue, skb); } else { /* no STA connected, drop */ kfree_skb(skb); skb = NULL; } goto out; multicast: __skb_queue_purge(queue); __skb_queue_tail(queue, skb); out: rcu_read_unlock(); } static void ieee80211_mlo_multicast_tx_one(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, u32 ctrl_flags, unsigned int link_id) { struct sk_buff *out; out = skb_copy(skb, GFP_ATOMIC); if (!out) return; ctrl_flags |= u32_encode_bits(link_id, IEEE80211_TX_CTRL_MLO_LINK); __ieee80211_subif_start_xmit(out, sdata->dev, 0, ctrl_flags, NULL); } static void ieee80211_mlo_multicast_tx(struct net_device *dev, struct sk_buff *skb) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); unsigned long links = sdata->vif.active_links; unsigned int link; u32 ctrl_flags = IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX; if (hweight16(links) == 1) { ctrl_flags |= u32_encode_bits(__ffs(links), IEEE80211_TX_CTRL_MLO_LINK); __ieee80211_subif_start_xmit(skb, sdata->dev, 0, ctrl_flags, NULL); return; } for_each_set_bit(link, &links, IEEE80211_MLD_MAX_NUM_LINKS) { ieee80211_mlo_multicast_tx_one(sdata, skb, ctrl_flags, link); ctrl_flags = 0; } kfree_skb(skb); } /** * ieee80211_subif_start_xmit - netif start_xmit function for 802.3 vifs * @skb: packet to be sent * @dev: incoming interface * * On failure skb will be freed. * * Returns: the netdev TX status (but really only %NETDEV_TX_OK) */ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); const struct ethhdr *eth = (void *)skb->data; if (likely(!is_multicast_ether_addr(eth->h_dest))) goto normal; if (unlikely(!ieee80211_sdata_running(sdata))) { kfree_skb(skb); return NETDEV_TX_OK; } if (unlikely(ieee80211_multicast_to_unicast(skb, dev))) { struct sk_buff_head queue; __skb_queue_head_init(&queue); ieee80211_convert_to_unicast(skb, dev, &queue); while ((skb = __skb_dequeue(&queue))) __ieee80211_subif_start_xmit(skb, dev, 0, IEEE80211_TX_CTRL_MLO_LINK_UNSPEC, NULL); } else if (ieee80211_vif_is_mld(&sdata->vif) && ((sdata->vif.type == NL80211_IFTYPE_AP && !ieee80211_hw_check(&sdata->local->hw, MLO_MCAST_MULTI_LINK_TX)) || (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->wdev.use_4addr))) { ieee80211_mlo_multicast_tx(dev, skb); } else { normal: __ieee80211_subif_start_xmit(skb, dev, 0, IEEE80211_TX_CTRL_MLO_LINK_UNSPEC, NULL); } return NETDEV_TX_OK; } static bool __ieee80211_tx_8023(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, struct sta_info *sta, bool txpending) { struct ieee80211_local *local = sdata->local; struct ieee80211_tx_control control = {}; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_sta *pubsta = NULL; unsigned long flags; int q = info->hw_queue; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); if (local->queue_stop_reasons[q] || (!txpending && !skb_queue_empty(&local->pending[q]))) { if (txpending) skb_queue_head(&local->pending[q], skb); else skb_queue_tail(&local->pending[q], skb); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); return false; } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); if (sta && sta->uploaded) pubsta = &sta->sta; control.sta = pubsta; drv_tx(local, &control, skb); return true; } static bool ieee80211_tx_8023(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, struct sta_info *sta, bool txpending) { struct ieee80211_local *local = sdata->local; struct sk_buff *next; bool ret = true; if (ieee80211_queue_skb(local, sdata, sta, skb)) return true; skb_list_walk_safe(skb, skb, next) { skb_mark_not_on_list(skb); if (!__ieee80211_tx_8023(sdata, skb, sta, txpending)) ret = false; } return ret; } static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata, struct net_device *dev, struct sta_info *sta, struct ieee80211_key *key, struct sk_buff *skb) { struct ieee80211_tx_info *info; struct ieee80211_local *local = sdata->local; struct tid_ampdu_tx *tid_tx; struct sk_buff *seg, *next; unsigned int skbs = 0, len = 0; u16 queue; u8 tid; queue = ieee80211_select_queue(sdata, sta, skb); skb_set_queue_mapping(skb, queue); if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning)) && test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)) goto out_free; skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) return; ieee80211_aggr_check(sdata, sta, skb); tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]); if (tid_tx) { if (!test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) { /* fall back to non-offload slow path */ __ieee80211_subif_start_xmit(skb, dev, 0, IEEE80211_TX_CTRL_MLO_LINK_UNSPEC, NULL); return; } if (tid_tx->timeout) tid_tx->last_tx = jiffies; } skb = ieee80211_tx_skb_fixup(skb, ieee80211_sdata_netdev_features(sdata)); if (!skb) return; info = IEEE80211_SKB_CB(skb); memset(info, 0, sizeof(*info)); info->hw_queue = sdata->vif.hw_queue[queue]; if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); info->flags |= IEEE80211_TX_CTL_HW_80211_ENCAP; info->control.vif = &sdata->vif; if (key) info->control.hw_key = &key->conf; skb_list_walk_safe(skb, seg, next) { skbs++; len += seg->len; if (seg != skb) memcpy(IEEE80211_SKB_CB(seg), info, sizeof(*info)); } if (unlikely(sk_requests_wifi_status(skb->sk))) { info->status_data = ieee80211_store_ack_skb(local, skb, &info->flags, NULL); if (info->status_data) info->status_data_idr = 1; } dev_sw_netstats_tx_add(dev, skbs, len); sta->deflink.tx_stats.packets[queue] += skbs; sta->deflink.tx_stats.bytes[queue] += len; ieee80211_tpt_led_trig_tx(local, len); ieee80211_tx_8023(sdata, skb, sta, false); return; out_free: kfree_skb(skb); } netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb, struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ethhdr *ehdr = (struct ethhdr *)skb->data; struct ieee80211_key *key; struct sta_info *sta; if (unlikely(!ieee80211_sdata_running(sdata) || skb->len < ETH_HLEN)) { kfree_skb(skb); return NETDEV_TX_OK; } rcu_read_lock(); if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) { kfree_skb(skb); goto out; } if (unlikely(IS_ERR_OR_NULL(sta) || !sta->uploaded || !test_sta_flag(sta, WLAN_STA_AUTHORIZED) || sdata->control_port_protocol == ehdr->h_proto)) goto skip_offload; key = rcu_dereference(sta->ptk[sta->ptk_idx]); if (!key) key = rcu_dereference(sdata->default_unicast_key); if (key && (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) || key->conf.cipher == WLAN_CIPHER_SUITE_TKIP)) goto skip_offload; sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift); ieee80211_8023_xmit(sdata, dev, sta, key, skb); goto out; skip_offload: ieee80211_subif_start_xmit(skb, dev); out: rcu_read_unlock(); return NETDEV_TX_OK; } struct sk_buff * ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, u32 info_flags) { struct ieee80211_hdr *hdr; struct ieee80211_tx_data tx = { .local = sdata->local, .sdata = sdata, }; struct sta_info *sta; rcu_read_lock(); if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) { kfree_skb(skb); skb = ERR_PTR(-EINVAL); goto out; } skb = ieee80211_build_hdr(sdata, skb, info_flags, sta, IEEE80211_TX_CTRL_MLO_LINK_UNSPEC, NULL); if (IS_ERR(skb)) goto out; hdr = (void *)skb->data; tx.sta = sta_info_get(sdata, hdr->addr1); tx.skb = skb; if (ieee80211_tx_h_select_key(&tx) != TX_CONTINUE) { rcu_read_unlock(); kfree_skb(skb); return ERR_PTR(-EINVAL); } out: rcu_read_unlock(); return skb; } /* * ieee80211_clear_tx_pending may not be called in a context where * it is possible that it packets could come in again. */ void ieee80211_clear_tx_pending(struct ieee80211_local *local) { struct sk_buff *skb; int i; for (i = 0; i < local->hw.queues; i++) { while ((skb = skb_dequeue(&local->pending[i])) != NULL) ieee80211_free_txskb(&local->hw, skb); } } /* * Returns false if the frame couldn't be transmitted but was queued instead, * which in this case means re-queued -- take as an indication to stop sending * more pending frames. */ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_sub_if_data *sdata; struct sta_info *sta; struct ieee80211_hdr *hdr; bool result; struct ieee80211_chanctx_conf *chanctx_conf; sdata = vif_to_sdata(info->control.vif); if (info->control.flags & IEEE80211_TX_INTCFL_NEED_TXPROCESSING) { /* update band only for non-MLD */ if (!ieee80211_vif_is_mld(&sdata->vif)) { chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); if (unlikely(!chanctx_conf)) { dev_kfree_skb(skb); return true; } info->band = chanctx_conf->def.chan->band; } result = ieee80211_tx(sdata, NULL, skb, true); } else if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) { if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) { dev_kfree_skb(skb); return true; } if (IS_ERR(sta) || (sta && !sta->uploaded)) sta = NULL; result = ieee80211_tx_8023(sdata, skb, sta, true); } else { struct sk_buff_head skbs; __skb_queue_head_init(&skbs); __skb_queue_tail(&skbs, skb); hdr = (struct ieee80211_hdr *)skb->data; sta = sta_info_get(sdata, hdr->addr1); result = __ieee80211_tx(local, &skbs, sta, true); } return result; } /* * Transmit all pending packets. Called from tasklet. */ void ieee80211_tx_pending(struct tasklet_struct *t) { struct ieee80211_local *local = from_tasklet(local, t, tx_pending_tasklet); unsigned long flags; int i; bool txok; rcu_read_lock(); spin_lock_irqsave(&local->queue_stop_reason_lock, flags); for (i = 0; i < local->hw.queues; i++) { /* * If queue is stopped by something other than due to pending * frames, or we have no pending frames, proceed to next queue. */ if (local->queue_stop_reasons[i] || skb_queue_empty(&local->pending[i])) continue; while (!skb_queue_empty(&local->pending[i])) { struct sk_buff *skb = __skb_dequeue(&local->pending[i]); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); if (WARN_ON(!info->control.vif)) { ieee80211_free_txskb(&local->hw, skb); continue; } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); txok = ieee80211_tx_pending_skb(local, skb); spin_lock_irqsave(&local->queue_stop_reason_lock, flags); if (!txok) break; } } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); rcu_read_unlock(); } /* functions for drivers to get certain frames */ static void ieee80211_beacon_add_tim_pvb(struct ps_data *ps, struct sk_buff *skb, bool mcast_traffic) { int i, n1 = 0, n2; /* * Find largest even number N1 so that bits numbered 1 through * (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits * (N2 + 1) x 8 through 2007 are 0. */ for (i = 0; i < IEEE80211_MAX_TIM_LEN; i++) { if (ps->tim[i]) { n1 = i & 0xfe; break; } } n2 = n1; for (i = IEEE80211_MAX_TIM_LEN - 1; i >= n1; i--) { if (ps->tim[i]) { n2 = i; break; } } /* Bitmap control */ skb_put_u8(skb, n1 | mcast_traffic); /* Part Virt Bitmap */ skb_put_data(skb, ps->tim + n1, n2 - n1 + 1); } /* * mac80211 currently supports encoding using block bitmap mode, non * inversed. The current implementation supports up to 1600 AIDs. * * Block bitmap encoding breaks down the AID bitmap into blocks of 64 * AIDs. Each block contains between 0 and 8 subblocks. Each subblock * describes 8 AIDs and the presence of a subblock is determined by * the block bitmap. */ static void ieee80211_s1g_beacon_add_tim_pvb(struct ps_data *ps, struct sk_buff *skb, bool mcast_traffic) { int blk; /* * Emit a bitmap control block with a page slice number of 31 and a * page index of 0 which indicates as per IEEE80211-2024 9.4.2.5.1 * that the entire page (2048 bits) indicated by the page index * is encoded in the partial virtual bitmap. */ skb_put_u8(skb, mcast_traffic | (31 << 1)); /* Emit an encoded block for each non-zero sub-block */ for (blk = 0; blk < IEEE80211_MAX_SUPPORTED_S1G_TIM_BLOCKS; blk++) { u8 blk_bmap = 0; int sblk; for (sblk = 0; sblk < 8; sblk++) { int sblk_idx = blk * 8 + sblk; /* * If the current subblock is non-zero, increase the * number of subblocks to emit for the current block. */ if (ps->tim[sblk_idx]) blk_bmap |= BIT(sblk); } /* If the current block contains no non-zero sublocks */ if (!blk_bmap) continue; /* * Emit a block control byte for the current encoded block * with an encoding mode of block bitmap (0x0), not inverse * (0x0) and the current block offset (5 bits) */ skb_put_u8(skb, blk << 3); /* * Emit the block bitmap for the current encoded block which * contains the present subblocks. */ skb_put_u8(skb, blk_bmap); /* Emit the present subblocks */ for (sblk = 0; sblk < 8; sblk++) { int sblk_idx = blk * 8 + sblk; if (!(blk_bmap & BIT(sblk))) continue; skb_put_u8(skb, ps->tim[sblk_idx]); } } } static void __ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata, struct ieee80211_link_data *link, struct ps_data *ps, struct sk_buff *skb, bool is_template) { struct element *tim; bool mcast_traffic = false, have_bits = false; struct ieee80211_bss_conf *link_conf = link->conf; bool s1g = ieee80211_get_link_sband(link)->band == NL80211_BAND_S1GHZ; /* Generate bitmap for TIM only if there are any STAs in power save * mode. */ if (atomic_read(&ps->num_sta_ps) > 0) /* in the hope that this is faster than * checking byte-for-byte */ have_bits = !bitmap_empty((unsigned long *)ps->tim, IEEE80211_MAX_AID + 1); if (!is_template) { if (ps->dtim_count == 0) ps->dtim_count = link_conf->dtim_period - 1; else ps->dtim_count--; } /* Length is set after parsing the AID bitmap */ tim = skb_put(skb, sizeof(struct element)); tim->id = WLAN_EID_TIM; skb_put_u8(skb, ps->dtim_count); skb_put_u8(skb, link_conf->dtim_period); if (ps->dtim_count == 0 && !skb_queue_empty(&ps->bc_buf)) mcast_traffic = true; ps->dtim_bc_mc = mcast_traffic; if (have_bits) { if (s1g) ieee80211_s1g_beacon_add_tim_pvb(ps, skb, mcast_traffic); else ieee80211_beacon_add_tim_pvb(ps, skb, mcast_traffic); } else { /* * If there is no buffered unicast traffic for an S1G * interface, we can exclude the bitmap control. This is in * contrast to other phy types as they do include the bitmap * control and pvb even when there is no buffered traffic. */ if (!s1g) { /* Bitmap control */ skb_put_u8(skb, mcast_traffic); /* Part Virt Bitmap */ skb_put_u8(skb, 0); } } tim->datalen = skb_tail_pointer(skb) - tim->data; } static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata, struct ieee80211_link_data *link, struct ps_data *ps, struct sk_buff *skb, bool is_template) { struct ieee80211_local *local = sdata->local; /* * Not very nice, but we want to allow the driver to call * ieee80211_beacon_get() as a response to the set_tim() * callback. That, however, is already invoked under the * sta_lock to guarantee consistent and race-free update * of the tim bitmap in mac80211 and the driver. */ if (local->tim_in_locked_section) { __ieee80211_beacon_add_tim(sdata, link, ps, skb, is_template); } else { spin_lock_bh(&local->tim_lock); __ieee80211_beacon_add_tim(sdata, link, ps, skb, is_template); spin_unlock_bh(&local->tim_lock); } return 0; } static void ieee80211_set_beacon_cntdwn(struct ieee80211_sub_if_data *sdata, struct beacon_data *beacon, struct ieee80211_link_data *link) { u8 *beacon_data, count, max_count = 1; struct probe_resp *resp; size_t beacon_data_len; u16 *bcn_offsets; int i; switch (sdata->vif.type) { case NL80211_IFTYPE_AP: beacon_data = beacon->tail; beacon_data_len = beacon->tail_len; break; case NL80211_IFTYPE_ADHOC: beacon_data = beacon->head; beacon_data_len = beacon->head_len; break; case NL80211_IFTYPE_MESH_POINT: beacon_data = beacon->head; beacon_data_len = beacon->head_len; break; default: return; } resp = rcu_dereference(link->u.ap.probe_resp); bcn_offsets = beacon->cntdwn_counter_offsets; count = beacon->cntdwn_current_counter; if (link->conf->csa_active) max_count = IEEE80211_MAX_CNTDWN_COUNTERS_NUM; for (i = 0; i < max_count; ++i) { if (bcn_offsets[i]) { if (WARN_ON_ONCE(bcn_offsets[i] >= beacon_data_len)) return; beacon_data[bcn_offsets[i]] = count; } if (sdata->vif.type == NL80211_IFTYPE_AP && resp) { u16 *resp_offsets = resp->cntdwn_counter_offsets; resp->data[resp_offsets[i]] = count; } } } static u8 __ieee80211_beacon_update_cntdwn(struct ieee80211_link_data *link, struct beacon_data *beacon) { if (beacon->cntdwn_current_counter == 1) { /* * Channel switch handling is done by a worker thread while * beacons get pulled from hardware timers. It's therefore * possible that software threads are slow enough to not be * able to complete CSA handling in a single beacon interval, * in which case we get here. There isn't much to do about * it, other than letting the user know that the AP isn't * behaving correctly. */ link_err_once(link, "beacon TX faster than countdown (channel/color switch) completion\n"); return 0; } beacon->cntdwn_current_counter--; return beacon->cntdwn_current_counter; } u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif, unsigned int link_id) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_link_data *link; struct beacon_data *beacon = NULL; u8 count = 0; if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS)) return 0; rcu_read_lock(); link = rcu_dereference(sdata->link[link_id]); if (!link) goto unlock; if (sdata->vif.type == NL80211_IFTYPE_AP) beacon = rcu_dereference(link->u.ap.beacon); else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) beacon = rcu_dereference(sdata->u.ibss.presp); else if (ieee80211_vif_is_mesh(&sdata->vif)) beacon = rcu_dereference(sdata->u.mesh.beacon); if (!beacon) goto unlock; count = __ieee80211_beacon_update_cntdwn(link, beacon); unlock: rcu_read_unlock(); return count; } EXPORT_SYMBOL(ieee80211_beacon_update_cntdwn); void ieee80211_beacon_set_cntdwn(struct ieee80211_vif *vif, u8 counter) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct beacon_data *beacon = NULL; rcu_read_lock(); if (sdata->vif.type == NL80211_IFTYPE_AP) beacon = rcu_dereference(sdata->deflink.u.ap.beacon); else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) beacon = rcu_dereference(sdata->u.ibss.presp); else if (ieee80211_vif_is_mesh(&sdata->vif)) beacon = rcu_dereference(sdata->u.mesh.beacon); if (!beacon) goto unlock; if (counter < beacon->cntdwn_current_counter) beacon->cntdwn_current_counter = counter; unlock: rcu_read_unlock(); } EXPORT_SYMBOL(ieee80211_beacon_set_cntdwn); bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif, unsigned int link_id) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_link_data *link; struct beacon_data *beacon = NULL; u8 *beacon_data; size_t beacon_data_len; int ret = false; if (!ieee80211_sdata_running(sdata)) return false; if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS)) return 0; rcu_read_lock(); link = rcu_dereference(sdata->link[link_id]); if (!link) goto out; if (vif->type == NL80211_IFTYPE_AP) { beacon = rcu_dereference(link->u.ap.beacon); if (WARN_ON(!beacon || !beacon->tail)) goto out; beacon_data = beacon->tail; beacon_data_len = beacon->tail_len; } else if (vif->type == NL80211_IFTYPE_ADHOC) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; beacon = rcu_dereference(ifibss->presp); if (!beacon) goto out; beacon_data = beacon->head; beacon_data_len = beacon->head_len; } else if (vif->type == NL80211_IFTYPE_MESH_POINT) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; beacon = rcu_dereference(ifmsh->beacon); if (!beacon) goto out; beacon_data = beacon->head; beacon_data_len = beacon->head_len; } else { WARN_ON(1); goto out; } if (!beacon->cntdwn_counter_offsets[0]) goto out; if (WARN_ON_ONCE(beacon->cntdwn_counter_offsets[0] > beacon_data_len)) goto out; if (beacon_data[beacon->cntdwn_counter_offsets[0]] == 1) ret = true; out: rcu_read_unlock(); return ret; } EXPORT_SYMBOL(ieee80211_beacon_cntdwn_is_complete); static int ieee80211_beacon_protect(struct sk_buff *skb, struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_link_data *link) { ieee80211_tx_result res; struct ieee80211_tx_data tx; struct sk_buff *check_skb; memset(&tx, 0, sizeof(tx)); tx.key = rcu_dereference(link->default_beacon_key); if (!tx.key) return 0; if (unlikely(tx.key->flags & KEY_FLAG_TAINTED)) { tx.key = NULL; return -EINVAL; } if (!(tx.key->conf.flags & IEEE80211_KEY_FLAG_SW_MGMT_TX) && tx.key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) IEEE80211_SKB_CB(skb)->control.hw_key = &tx.key->conf; tx.local = local; tx.sdata = sdata; __skb_queue_head_init(&tx.skbs); __skb_queue_tail(&tx.skbs, skb); res = ieee80211_tx_h_encrypt(&tx); check_skb = __skb_dequeue(&tx.skbs); /* we may crash after this, but it'd be a bug in crypto */ WARN_ON(check_skb != skb); if (WARN_ON_ONCE(res != TX_CONTINUE)) return -EINVAL; return 0; } static void ieee80211_beacon_get_finish(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_link_data *link, struct ieee80211_mutable_offsets *offs, struct beacon_data *beacon, struct sk_buff *skb, struct ieee80211_chanctx_conf *chanctx_conf, u16 csa_off_base) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_tx_info *info; enum nl80211_band band; struct ieee80211_tx_rate_control txrc; /* CSA offsets */ if (offs && beacon) { u16 i; for (i = 0; i < IEEE80211_MAX_CNTDWN_COUNTERS_NUM; i++) { u16 csa_off = beacon->cntdwn_counter_offsets[i]; if (!csa_off) continue; offs->cntdwn_counter_offs[i] = csa_off_base + csa_off; } } band = chanctx_conf->def.chan->band; info = IEEE80211_SKB_CB(skb); info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; info->flags |= IEEE80211_TX_CTL_NO_ACK; info->band = band; memset(&txrc, 0, sizeof(txrc)); txrc.hw = hw; txrc.sband = local->hw.wiphy->bands[band]; txrc.bss_conf = link->conf; txrc.skb = skb; txrc.reported_rate.idx = -1; if (sdata->beacon_rate_set && sdata->beacon_rateidx_mask[band]) txrc.rate_idx_mask = sdata->beacon_rateidx_mask[band]; else txrc.rate_idx_mask = sdata->rc_rateidx_mask[band]; txrc.bss = true; rate_control_get_rate(sdata, NULL, &txrc); info->control.vif = vif; info->control.flags |= u32_encode_bits(link->link_id, IEEE80211_TX_CTRL_MLO_LINK); info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT | IEEE80211_TX_CTL_ASSIGN_SEQ | IEEE80211_TX_CTL_FIRST_FRAGMENT; } static void ieee80211_beacon_add_mbssid(struct sk_buff *skb, struct beacon_data *beacon, u8 i) { if (!beacon->mbssid_ies || !beacon->mbssid_ies->cnt || i > beacon->mbssid_ies->cnt) return; if (i < beacon->mbssid_ies->cnt) { skb_put_data(skb, beacon->mbssid_ies->elem[i].data, beacon->mbssid_ies->elem[i].len); if (beacon->rnr_ies && beacon->rnr_ies->cnt) { skb_put_data(skb, beacon->rnr_ies->elem[i].data, beacon->rnr_ies->elem[i].len); for (i = beacon->mbssid_ies->cnt; i < beacon->rnr_ies->cnt; i++) skb_put_data(skb, beacon->rnr_ies->elem[i].data, beacon->rnr_ies->elem[i].len); } return; } /* i == beacon->mbssid_ies->cnt, include all MBSSID elements */ for (i = 0; i < beacon->mbssid_ies->cnt; i++) skb_put_data(skb, beacon->mbssid_ies->elem[i].data, beacon->mbssid_ies->elem[i].len); } static struct sk_buff * __ieee80211_beacon_get_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_link_data *link, struct ieee80211_mutable_offsets *offs, bool is_template, struct beacon_data *beacon, struct ieee80211_chanctx_conf *chanctx_conf, u8 ema_index) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_if_ap *ap = &sdata->u.ap; struct sk_buff *skb = NULL; u16 csa_off_base = 0; int mbssid_len; if (beacon->cntdwn_counter_offsets[0]) { if (!is_template) ieee80211_beacon_update_cntdwn(vif, link->link_id); ieee80211_set_beacon_cntdwn(sdata, beacon, link); } /* headroom, head length, * tail length, maximum TIM length and multiple BSSID length */ mbssid_len = ieee80211_get_mbssid_beacon_len(beacon->mbssid_ies, beacon->rnr_ies, ema_index); skb = dev_alloc_skb(local->tx_headroom + beacon->head_len + beacon->tail_len + 256 + local->hw.extra_beacon_tailroom + mbssid_len); if (!skb) return NULL; skb_reserve(skb, local->tx_headroom); skb_put_data(skb, beacon->head, beacon->head_len); ieee80211_beacon_add_tim(sdata, link, &ap->ps, skb, is_template); if (offs) { offs->tim_offset = beacon->head_len; offs->tim_length = skb->len - beacon->head_len; offs->cntdwn_counter_offs[0] = beacon->cntdwn_counter_offsets[0]; if (mbssid_len) { ieee80211_beacon_add_mbssid(skb, beacon, ema_index); offs->mbssid_off = skb->len - mbssid_len; } /* for AP the csa offsets are from tail */ csa_off_base = skb->len; } if (beacon->tail) skb_put_data(skb, beacon->tail, beacon->tail_len); if (ieee80211_beacon_protect(skb, local, sdata, link) < 0) { dev_kfree_skb(skb); return NULL; } ieee80211_beacon_get_finish(hw, vif, link, offs, beacon, skb, chanctx_conf, csa_off_base); return skb; } static bool ieee80211_s1g_need_long_beacon(struct ieee80211_sub_if_data *sdata, struct ieee80211_link_data *link) { struct ps_data *ps = &sdata->u.ap.ps; if (ps->sb_count == 0) ps->sb_count = link->conf->s1g_long_beacon_period - 1; else ps->sb_count--; return ps->sb_count == 0; } static struct sk_buff * ieee80211_s1g_short_beacon_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_link_data *link, struct ieee80211_chanctx_conf *chanctx_conf, struct s1g_short_beacon_data *sb, bool is_template) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_if_ap *ap = &sdata->u.ap; struct sk_buff *skb; skb = dev_alloc_skb(local->tx_headroom + sb->short_head_len + sb->short_tail_len + 256 + local->hw.extra_beacon_tailroom); if (!skb) return NULL; skb_reserve(skb, local->tx_headroom); skb_put_data(skb, sb->short_head, sb->short_head_len); ieee80211_beacon_add_tim(sdata, link, &ap->ps, skb, is_template); if (sb->short_tail) skb_put_data(skb, sb->short_tail, sb->short_tail_len); ieee80211_beacon_get_finish(hw, vif, link, NULL, NULL, skb, chanctx_conf, 0); return skb; } static struct sk_buff * ieee80211_beacon_get_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_link_data *link, struct ieee80211_mutable_offsets *offs, bool is_template, struct beacon_data *beacon, struct ieee80211_chanctx_conf *chanctx_conf, u8 ema_index, struct s1g_short_beacon_data *s1g_sb) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); if (!sdata->vif.cfg.s1g || !s1g_sb || ieee80211_s1g_need_long_beacon(sdata, link)) return __ieee80211_beacon_get_ap(hw, vif, link, offs, is_template, beacon, chanctx_conf, ema_index); return ieee80211_s1g_short_beacon_get(hw, vif, link, chanctx_conf, s1g_sb, is_template); } static struct ieee80211_ema_beacons * ieee80211_beacon_get_ap_ema_list(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_link_data *link, struct ieee80211_mutable_offsets *offs, bool is_template, struct beacon_data *beacon, struct ieee80211_chanctx_conf *chanctx_conf) { struct ieee80211_ema_beacons *ema = NULL; if (!beacon->mbssid_ies || !beacon->mbssid_ies->cnt) return NULL; ema = kzalloc(struct_size(ema, bcn, beacon->mbssid_ies->cnt), GFP_ATOMIC); if (!ema) return NULL; for (ema->cnt = 0; ema->cnt < beacon->mbssid_ies->cnt; ema->cnt++) { ema->bcn[ema->cnt].skb = ieee80211_beacon_get_ap(hw, vif, link, &ema->bcn[ema->cnt].offs, is_template, beacon, chanctx_conf, ema->cnt, NULL); if (!ema->bcn[ema->cnt].skb) break; } if (ema->cnt == beacon->mbssid_ies->cnt) return ema; ieee80211_beacon_free_ema_list(ema); return NULL; } #define IEEE80211_INCLUDE_ALL_MBSSID_ELEMS -1 static struct sk_buff * __ieee80211_beacon_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_mutable_offsets *offs, bool is_template, unsigned int link_id, int ema_index, struct ieee80211_ema_beacons **ema_beacons) { struct ieee80211_local *local = hw_to_local(hw); struct beacon_data *beacon = NULL; struct sk_buff *skb = NULL; struct ieee80211_sub_if_data *sdata = NULL; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_link_data *link; struct s1g_short_beacon_data *s1g_short_bcn = NULL; rcu_read_lock(); sdata = vif_to_sdata(vif); link = rcu_dereference(sdata->link[link_id]); if (!link) goto out; chanctx_conf = rcu_dereference(link->conf->chanctx_conf); if (!ieee80211_sdata_running(sdata) || !chanctx_conf) goto out; if (offs) memset(offs, 0, sizeof(*offs)); if (sdata->vif.type == NL80211_IFTYPE_AP) { beacon = rcu_dereference(link->u.ap.beacon); if (!beacon) goto out; if (vif->cfg.s1g && link->u.ap.s1g_short_beacon) { s1g_short_bcn = rcu_dereference(link->u.ap.s1g_short_beacon); if (!s1g_short_bcn) goto out; } if (ema_beacons) { *ema_beacons = ieee80211_beacon_get_ap_ema_list(hw, vif, link, offs, is_template, beacon, chanctx_conf); } else { if (beacon->mbssid_ies && beacon->mbssid_ies->cnt) { if (ema_index >= beacon->mbssid_ies->cnt) goto out; /* End of MBSSID elements */ if (ema_index <= IEEE80211_INCLUDE_ALL_MBSSID_ELEMS) ema_index = beacon->mbssid_ies->cnt; } else { ema_index = 0; } skb = ieee80211_beacon_get_ap(hw, vif, link, offs, is_template, beacon, chanctx_conf, ema_index, s1g_short_bcn); } } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_hdr *hdr; beacon = rcu_dereference(ifibss->presp); if (!beacon) goto out; if (beacon->cntdwn_counter_offsets[0]) { if (!is_template) __ieee80211_beacon_update_cntdwn(link, beacon); ieee80211_set_beacon_cntdwn(sdata, beacon, link); } skb = dev_alloc_skb(local->tx_headroom + beacon->head_len + local->hw.extra_beacon_tailroom); if (!skb) goto out; skb_reserve(skb, local->tx_headroom); skb_put_data(skb, beacon->head, beacon->head_len); hdr = (struct ieee80211_hdr *) skb->data; hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); ieee80211_beacon_get_finish(hw, vif, link, offs, beacon, skb, chanctx_conf, 0); } else if (ieee80211_vif_is_mesh(&sdata->vif)) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; beacon = rcu_dereference(ifmsh->beacon); if (!beacon) goto out; if (beacon->cntdwn_counter_offsets[0]) { if (!is_template) /* TODO: For mesh csa_counter is in TU, so * decrementing it by one isn't correct, but * for now we leave it consistent with overall * mac80211's behavior. */ __ieee80211_beacon_update_cntdwn(link, beacon); ieee80211_set_beacon_cntdwn(sdata, beacon, link); } if (ifmsh->sync_ops) ifmsh->sync_ops->adjust_tsf(sdata, beacon); skb = dev_alloc_skb(local->tx_headroom + beacon->head_len + 256 + /* TIM IE */ beacon->tail_len + local->hw.extra_beacon_tailroom); if (!skb) goto out; skb_reserve(skb, local->tx_headroom); skb_put_data(skb, beacon->head, beacon->head_len); ieee80211_beacon_add_tim(sdata, link, &ifmsh->ps, skb, is_template); if (offs) { offs->tim_offset = beacon->head_len; offs->tim_length = skb->len - beacon->head_len; } skb_put_data(skb, beacon->tail, beacon->tail_len); ieee80211_beacon_get_finish(hw, vif, link, offs, beacon, skb, chanctx_conf, 0); } else { WARN_ON(1); goto out; } out: rcu_read_unlock(); return skb; } struct sk_buff * ieee80211_beacon_get_template(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_mutable_offsets *offs, unsigned int link_id) { return __ieee80211_beacon_get(hw, vif, offs, true, link_id, IEEE80211_INCLUDE_ALL_MBSSID_ELEMS, NULL); } EXPORT_SYMBOL(ieee80211_beacon_get_template); struct sk_buff * ieee80211_beacon_get_template_ema_index(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_mutable_offsets *offs, unsigned int link_id, u8 ema_index) { return __ieee80211_beacon_get(hw, vif, offs, true, link_id, ema_index, NULL); } EXPORT_SYMBOL(ieee80211_beacon_get_template_ema_index); void ieee80211_beacon_free_ema_list(struct ieee80211_ema_beacons *ema_beacons) { u8 i; if (!ema_beacons) return; for (i = 0; i < ema_beacons->cnt; i++) kfree_skb(ema_beacons->bcn[i].skb); kfree(ema_beacons); } EXPORT_SYMBOL(ieee80211_beacon_free_ema_list); struct ieee80211_ema_beacons * ieee80211_beacon_get_template_ema_list(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int link_id) { struct ieee80211_ema_beacons *ema_beacons = NULL; WARN_ON(__ieee80211_beacon_get(hw, vif, NULL, true, link_id, 0, &ema_beacons)); return ema_beacons; } EXPORT_SYMBOL(ieee80211_beacon_get_template_ema_list); struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 *tim_offset, u16 *tim_length, unsigned int link_id) { struct ieee80211_mutable_offsets offs = {}; struct sk_buff *bcn = __ieee80211_beacon_get(hw, vif, &offs, false, link_id, IEEE80211_INCLUDE_ALL_MBSSID_ELEMS, NULL); struct sk_buff *copy; if (!bcn) return bcn; if (tim_offset) *tim_offset = offs.tim_offset; if (tim_length) *tim_length = offs.tim_length; if (ieee80211_hw_check(hw, BEACON_TX_STATUS) || !hw_to_local(hw)->monitors) return bcn; /* send a copy to monitor interfaces */ copy = skb_copy(bcn, GFP_ATOMIC); if (!copy) return bcn; ieee80211_tx_monitor(hw_to_local(hw), copy, 1, NULL); return bcn; } EXPORT_SYMBOL(ieee80211_beacon_get_tim); struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct sk_buff *skb = NULL; struct probe_resp *presp = NULL; struct ieee80211_hdr *hdr; struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); if (sdata->vif.type != NL80211_IFTYPE_AP) return NULL; rcu_read_lock(); presp = rcu_dereference(sdata->deflink.u.ap.probe_resp); if (!presp) goto out; skb = dev_alloc_skb(presp->len); if (!skb) goto out; skb_put_data(skb, presp->data, presp->len); hdr = (struct ieee80211_hdr *) skb->data; memset(hdr->addr1, 0, sizeof(hdr->addr1)); out: rcu_read_unlock(); return skb; } EXPORT_SYMBOL(ieee80211_proberesp_get); struct sk_buff *ieee80211_get_fils_discovery_tmpl(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct sk_buff *skb = NULL; struct fils_discovery_data *tmpl = NULL; struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); if (sdata->vif.type != NL80211_IFTYPE_AP) return NULL; rcu_read_lock(); tmpl = rcu_dereference(sdata->deflink.u.ap.fils_discovery); if (!tmpl) { rcu_read_unlock(); return NULL; } skb = dev_alloc_skb(sdata->local->hw.extra_tx_headroom + tmpl->len); if (skb) { skb_reserve(skb, sdata->local->hw.extra_tx_headroom); skb_put_data(skb, tmpl->data, tmpl->len); } rcu_read_unlock(); return skb; } EXPORT_SYMBOL(ieee80211_get_fils_discovery_tmpl); struct sk_buff * ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct sk_buff *skb = NULL; struct unsol_bcast_probe_resp_data *tmpl = NULL; struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); if (sdata->vif.type != NL80211_IFTYPE_AP) return NULL; rcu_read_lock(); tmpl = rcu_dereference(sdata->deflink.u.ap.unsol_bcast_probe_resp); if (!tmpl) { rcu_read_unlock(); return NULL; } skb = dev_alloc_skb(sdata->local->hw.extra_tx_headroom + tmpl->len); if (skb) { skb_reserve(skb, sdata->local->hw.extra_tx_headroom); skb_put_data(skb, tmpl->data, tmpl->len); } rcu_read_unlock(); return skb; } EXPORT_SYMBOL(ieee80211_get_unsol_bcast_probe_resp_tmpl); struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata; struct ieee80211_pspoll *pspoll; struct ieee80211_local *local; struct sk_buff *skb; if (WARN_ON(vif->type != NL80211_IFTYPE_STATION)) return NULL; sdata = vif_to_sdata(vif); local = sdata->local; skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll)); if (!skb) return NULL; skb_reserve(skb, local->hw.extra_tx_headroom); pspoll = skb_put_zero(skb, sizeof(*pspoll)); pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL); pspoll->aid = cpu_to_le16(sdata->vif.cfg.aid); /* aid in PS-Poll has its two MSBs each set to 1 */ pspoll->aid |= cpu_to_le16(1 << 15 | 1 << 14); memcpy(pspoll->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN); memcpy(pspoll->ta, vif->addr, ETH_ALEN); return skb; } EXPORT_SYMBOL(ieee80211_pspoll_get); struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int link_id, bool qos_ok) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_local *local = sdata->local; struct ieee80211_link_data *link = NULL; struct ieee80211_hdr_3addr *nullfunc; struct sk_buff *skb; bool qos = false; if (WARN_ON(vif->type != NL80211_IFTYPE_STATION)) return NULL; skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc) + 2); if (!skb) return NULL; rcu_read_lock(); if (qos_ok) { struct sta_info *sta; sta = sta_info_get(sdata, vif->cfg.ap_addr); qos = sta && sta->sta.wme; } if (link_id >= 0) { link = rcu_dereference(sdata->link[link_id]); if (WARN_ON_ONCE(!link)) { rcu_read_unlock(); kfree_skb(skb); return NULL; } } skb_reserve(skb, local->hw.extra_tx_headroom); nullfunc = skb_put_zero(skb, sizeof(*nullfunc)); nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_TODS); if (qos) { __le16 qoshdr = cpu_to_le16(7); BUILD_BUG_ON((IEEE80211_STYPE_QOS_NULLFUNC | IEEE80211_STYPE_NULLFUNC) != IEEE80211_STYPE_QOS_NULLFUNC); nullfunc->frame_control |= cpu_to_le16(IEEE80211_STYPE_QOS_NULLFUNC); skb->priority = 7; skb_set_queue_mapping(skb, IEEE80211_AC_VO); skb_put_data(skb, &qoshdr, sizeof(qoshdr)); } if (link) { memcpy(nullfunc->addr1, link->conf->bssid, ETH_ALEN); memcpy(nullfunc->addr2, link->conf->addr, ETH_ALEN); memcpy(nullfunc->addr3, link->conf->bssid, ETH_ALEN); } else { memcpy(nullfunc->addr1, vif->cfg.ap_addr, ETH_ALEN); memcpy(nullfunc->addr2, vif->addr, ETH_ALEN); memcpy(nullfunc->addr3, vif->cfg.ap_addr, ETH_ALEN); } rcu_read_unlock(); return skb; } EXPORT_SYMBOL(ieee80211_nullfunc_get); struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw, const u8 *src_addr, const u8 *ssid, size_t ssid_len, size_t tailroom) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_hdr_3addr *hdr; struct sk_buff *skb; size_t ie_ssid_len; u8 *pos; ie_ssid_len = 2 + ssid_len; skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) + ie_ssid_len + tailroom); if (!skb) return NULL; skb_reserve(skb, local->hw.extra_tx_headroom); hdr = skb_put_zero(skb, sizeof(*hdr)); hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ); eth_broadcast_addr(hdr->addr1); memcpy(hdr->addr2, src_addr, ETH_ALEN); eth_broadcast_addr(hdr->addr3); pos = skb_put(skb, ie_ssid_len); *pos++ = WLAN_EID_SSID; *pos++ = ssid_len; if (ssid_len) memcpy(pos, ssid, ssid_len); pos += ssid_len; return skb; } EXPORT_SYMBOL(ieee80211_probereq_get); void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const void *frame, size_t frame_len, const struct ieee80211_tx_info *frame_txctl, struct ieee80211_rts *rts) { const struct ieee80211_hdr *hdr = frame; rts->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS); rts->duration = ieee80211_rts_duration(hw, vif, frame_len, frame_txctl); memcpy(rts->ra, hdr->addr1, sizeof(rts->ra)); memcpy(rts->ta, hdr->addr2, sizeof(rts->ta)); } EXPORT_SYMBOL(ieee80211_rts_get); void ieee80211_ctstoself_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const void *frame, size_t frame_len, const struct ieee80211_tx_info *frame_txctl, struct ieee80211_cts *cts) { const struct ieee80211_hdr *hdr = frame; cts->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS); cts->duration = ieee80211_ctstoself_duration(hw, vif, frame_len, frame_txctl); memcpy(cts->ra, hdr->addr1, sizeof(cts->ra)); } EXPORT_SYMBOL(ieee80211_ctstoself_get); struct sk_buff * ieee80211_get_buffered_bc(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ieee80211_local *local = hw_to_local(hw); struct sk_buff *skb = NULL; struct ieee80211_tx_data tx; struct ieee80211_sub_if_data *sdata; struct ps_data *ps; struct ieee80211_tx_info *info; struct ieee80211_chanctx_conf *chanctx_conf; sdata = vif_to_sdata(vif); rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); if (!chanctx_conf) goto out; if (sdata->vif.type == NL80211_IFTYPE_AP) { struct beacon_data *beacon = rcu_dereference(sdata->deflink.u.ap.beacon); if (!beacon || !beacon->head) goto out; ps = &sdata->u.ap.ps; } else if (ieee80211_vif_is_mesh(&sdata->vif)) { ps = &sdata->u.mesh.ps; } else { goto out; } if (ps->dtim_count != 0 || !ps->dtim_bc_mc) goto out; /* send buffered bc/mc only after DTIM beacon */ while (1) { skb = skb_dequeue(&ps->bc_buf); if (!skb) goto out; local->total_ps_buffered--; if (!skb_queue_empty(&ps->bc_buf) && skb->len >= 2) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; /* more buffered multicast/broadcast frames ==> set * MoreData flag in IEEE 802.11 header to inform PS * STAs */ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); } if (sdata->vif.type == NL80211_IFTYPE_AP) sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); if (!ieee80211_tx_prepare(sdata, &tx, NULL, skb)) break; ieee80211_free_txskb(hw, skb); } info = IEEE80211_SKB_CB(skb); tx.flags |= IEEE80211_TX_PS_BUFFERED; info->band = chanctx_conf->def.chan->band; if (invoke_tx_handlers(&tx)) skb = NULL; out: rcu_read_unlock(); return skb; } EXPORT_SYMBOL(ieee80211_get_buffered_bc); int ieee80211_reserve_tid(struct ieee80211_sta *pubsta, u8 tid) { struct sta_info *sta = container_of(pubsta, struct sta_info, sta); struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_local *local = sdata->local; int ret; u32 queues; lockdep_assert_wiphy(local->hw.wiphy); /* only some cases are supported right now */ switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: break; default: WARN_ON(1); return -EINVAL; } if (WARN_ON(tid >= IEEE80211_NUM_UPS)) return -EINVAL; if (sta->reserved_tid == tid) { ret = 0; goto out; } if (sta->reserved_tid != IEEE80211_TID_UNRESERVED) { sdata_err(sdata, "TID reservation already active\n"); ret = -EALREADY; goto out; } ieee80211_stop_vif_queues(sdata->local, sdata, IEEE80211_QUEUE_STOP_REASON_RESERVE_TID); synchronize_net(); /* Tear down BA sessions so we stop aggregating on this TID */ if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION)) { set_sta_flag(sta, WLAN_STA_BLOCK_BA); __ieee80211_stop_tx_ba_session(sta, tid, AGG_STOP_LOCAL_REQUEST); } queues = BIT(sdata->vif.hw_queue[ieee802_1d_to_ac[tid]]); __ieee80211_flush_queues(local, sdata, queues, false); sta->reserved_tid = tid; ieee80211_wake_vif_queues(local, sdata, IEEE80211_QUEUE_STOP_REASON_RESERVE_TID); if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION)) clear_sta_flag(sta, WLAN_STA_BLOCK_BA); ret = 0; out: return ret; } EXPORT_SYMBOL(ieee80211_reserve_tid); void ieee80211_unreserve_tid(struct ieee80211_sta *pubsta, u8 tid) { struct sta_info *sta = container_of(pubsta, struct sta_info, sta); struct ieee80211_sub_if_data *sdata = sta->sdata; lockdep_assert_wiphy(sdata->local->hw.wiphy); /* only some cases are supported right now */ switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: break; default: WARN_ON(1); return; } if (tid != sta->reserved_tid) { sdata_err(sdata, "TID to unreserve (%d) isn't reserved\n", tid); return; } sta->reserved_tid = IEEE80211_TID_UNRESERVED; } EXPORT_SYMBOL(ieee80211_unreserve_tid); void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int tid, int link_id, enum nl80211_band band) { const struct ieee80211_hdr *hdr = (void *)skb->data; int ac = ieee80211_ac_from_tid(tid); unsigned int link; skb_reset_mac_header(skb); skb_set_queue_mapping(skb, ac); skb->priority = tid; skb->dev = sdata->dev; BUILD_BUG_ON(IEEE80211_LINK_UNSPECIFIED < IEEE80211_MLD_MAX_NUM_LINKS); BUILD_BUG_ON(!FIELD_FIT(IEEE80211_TX_CTRL_MLO_LINK, IEEE80211_LINK_UNSPECIFIED)); if (!ieee80211_vif_is_mld(&sdata->vif)) { link = 0; } else if (link_id >= 0) { link = link_id; } else if (memcmp(sdata->vif.addr, hdr->addr2, ETH_ALEN) == 0) { /* address from the MLD */ link = IEEE80211_LINK_UNSPECIFIED; } else { /* otherwise must be addressed from a link */ rcu_read_lock(); for (link = 0; link < ARRAY_SIZE(sdata->vif.link_conf); link++) { struct ieee80211_bss_conf *link_conf; link_conf = rcu_dereference(sdata->vif.link_conf[link]); if (!link_conf) continue; if (memcmp(link_conf->addr, hdr->addr2, ETH_ALEN) == 0) break; } rcu_read_unlock(); if (WARN_ON_ONCE(link == ARRAY_SIZE(sdata->vif.link_conf))) link = ffs(sdata->vif.active_links) - 1; } IEEE80211_SKB_CB(skb)->control.flags |= u32_encode_bits(link, IEEE80211_TX_CTRL_MLO_LINK); /* * The other path calling ieee80211_xmit is from the tasklet, * and while we can handle concurrent transmissions locking * requirements are that we do not come into tx with bhs on. */ local_bh_disable(); IEEE80211_SKB_CB(skb)->band = band; ieee80211_xmit(sdata, NULL, skb); local_bh_enable(); } void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int tid, int link_id) { struct ieee80211_chanctx_conf *chanctx_conf; enum nl80211_band band; rcu_read_lock(); if (sdata->vif.type == NL80211_IFTYPE_NAN) { band = NUM_NL80211_BANDS; } else if (!ieee80211_vif_is_mld(&sdata->vif)) { WARN_ON(link_id >= 0); chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); if (WARN_ON(!chanctx_conf)) { rcu_read_unlock(); kfree_skb(skb); return; } band = chanctx_conf->def.chan->band; } else { WARN_ON(link_id >= 0 && !(sdata->vif.active_links & BIT(link_id))); /* MLD transmissions must not rely on the band */ band = 0; } __ieee80211_tx_skb_tid_band(sdata, skb, tid, link_id, band); rcu_read_unlock(); } int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev, const u8 *buf, size_t len, const u8 *dest, __be16 proto, bool unencrypted, int link_id, u64 *cookie) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct sta_info *sta; struct sk_buff *skb; struct ethhdr *ehdr; u32 ctrl_flags = 0; u32 flags = 0; int err; /* mutex lock is only needed for incrementing the cookie counter */ lockdep_assert_wiphy(local->hw.wiphy); /* Only accept CONTROL_PORT_PROTOCOL configured in CONNECT/ASSOCIATE * or Pre-Authentication */ if (proto != sdata->control_port_protocol && proto != cpu_to_be16(ETH_P_PREAUTH)) return -EINVAL; if (proto == sdata->control_port_protocol) ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO | IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP; if (unencrypted) flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; if (cookie) ctrl_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; flags |= IEEE80211_TX_INTFL_NL80211_FRAME_TX; skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(struct ethhdr) + len); if (!skb) return -ENOMEM; skb_reserve(skb, local->hw.extra_tx_headroom + sizeof(struct ethhdr)); skb_put_data(skb, buf, len); ehdr = skb_push(skb, sizeof(struct ethhdr)); memcpy(ehdr->h_dest, dest, ETH_ALEN); /* we may override the SA for MLO STA later */ if (link_id < 0) { ctrl_flags |= u32_encode_bits(IEEE80211_LINK_UNSPECIFIED, IEEE80211_TX_CTRL_MLO_LINK); memcpy(ehdr->h_source, sdata->vif.addr, ETH_ALEN); } else { struct ieee80211_bss_conf *link_conf; ctrl_flags |= u32_encode_bits(link_id, IEEE80211_TX_CTRL_MLO_LINK); rcu_read_lock(); link_conf = rcu_dereference(sdata->vif.link_conf[link_id]); if (!link_conf) { dev_kfree_skb(skb); rcu_read_unlock(); return -ENOLINK; } memcpy(ehdr->h_source, link_conf->addr, ETH_ALEN); rcu_read_unlock(); } ehdr->h_proto = proto; skb->dev = dev; skb->protocol = proto; skb_reset_network_header(skb); skb_reset_mac_header(skb); if (local->hw.queues < IEEE80211_NUM_ACS) goto start_xmit; /* update QoS header to prioritize control port frames if possible, * prioritization also happens for control port frames send over * AF_PACKET */ rcu_read_lock(); err = ieee80211_lookup_ra_sta(sdata, skb, &sta); if (err) { dev_kfree_skb(skb); rcu_read_unlock(); return err; } if (!IS_ERR(sta)) { u16 queue = ieee80211_select_queue(sdata, sta, skb); skb_set_queue_mapping(skb, queue); /* * for MLO STA, the SA should be the AP MLD address, but * the link ID has been selected already */ if (sta && sta->sta.mlo) memcpy(ehdr->h_source, sdata->vif.addr, ETH_ALEN); } rcu_read_unlock(); start_xmit: local_bh_disable(); __ieee80211_subif_start_xmit(skb, skb->dev, flags, ctrl_flags, cookie); local_bh_enable(); return 0; } int ieee80211_probe_mesh_link(struct wiphy *wiphy, struct net_device *dev, const u8 *buf, size_t len) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct sk_buff *skb; skb = dev_alloc_skb(local->hw.extra_tx_headroom + len + 30 + /* header size */ 18); /* 11s header size */ if (!skb) return -ENOMEM; skb_reserve(skb, local->hw.extra_tx_headroom); skb_put_data(skb, buf, len); skb->dev = dev; skb->protocol = htons(ETH_P_802_3); skb_reset_network_header(skb); skb_reset_mac_header(skb); local_bh_disable(); __ieee80211_subif_start_xmit(skb, skb->dev, 0, IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP, NULL); local_bh_enable(); return 0; }
3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 // SPDX-License-Identifier: GPL-2.0-only /*************************************************************************** * Copyright (C) 2010-2012 by Bruno Prémont <bonbons@linux-vserver.org> * * * * Based on Logitech G13 driver (v0.4) * * Copyright (C) 2009 by Rick L. Vinyard, Jr. <rvinyard@cs.nmsu.edu> * * * ***************************************************************************/ #include <linux/hid.h> #include <linux/vmalloc.h> #include <linux/fb.h> #include <linux/module.h> #include "hid-picolcd.h" /* Framebuffer * * The PicoLCD use a Topway LCD module of 256x64 pixel * This display area is tiled over 4 controllers with 8 tiles * each. Each tile has 8x64 pixel, each data byte representing * a 1-bit wide vertical line of the tile. * * The display can be updated at a tile granularity. * * Chip 1 Chip 2 Chip 3 Chip 4 * +----------------+----------------+----------------+----------------+ * | Tile 1 | Tile 1 | Tile 1 | Tile 1 | * +----------------+----------------+----------------+----------------+ * | Tile 2 | Tile 2 | Tile 2 | Tile 2 | * +----------------+----------------+----------------+----------------+ * ... * +----------------+----------------+----------------+----------------+ * | Tile 8 | Tile 8 | Tile 8 | Tile 8 | * +----------------+----------------+----------------+----------------+ */ #define PICOLCDFB_NAME "picolcdfb" #define PICOLCDFB_WIDTH (256) #define PICOLCDFB_HEIGHT (64) #define PICOLCDFB_SIZE (PICOLCDFB_WIDTH * PICOLCDFB_HEIGHT / 8) #define PICOLCDFB_UPDATE_RATE_LIMIT 10 #define PICOLCDFB_UPDATE_RATE_DEFAULT 2 /* Framebuffer visual structures */ static const struct fb_fix_screeninfo picolcdfb_fix = { .id = PICOLCDFB_NAME, .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_MONO01, .xpanstep = 0, .ypanstep = 0, .ywrapstep = 0, .line_length = PICOLCDFB_WIDTH / 8, .accel = FB_ACCEL_NONE, }; static const struct fb_var_screeninfo picolcdfb_var = { .xres = PICOLCDFB_WIDTH, .yres = PICOLCDFB_HEIGHT, .xres_virtual = PICOLCDFB_WIDTH, .yres_virtual = PICOLCDFB_HEIGHT, .width = 103, .height = 26, .bits_per_pixel = 1, .grayscale = 1, .red = { .offset = 0, .length = 1, .msb_right = 0, }, .green = { .offset = 0, .length = 1, .msb_right = 0, }, .blue = { .offset = 0, .length = 1, .msb_right = 0, }, .transp = { .offset = 0, .length = 0, .msb_right = 0, }, }; /* Send a given tile to PicoLCD */ static int picolcd_fb_send_tile(struct picolcd_data *data, u8 *vbitmap, int chip, int tile) { struct hid_report *report1, *report2; unsigned long flags; u8 *tdata; int i; report1 = picolcd_out_report(REPORT_LCD_CMD_DATA, data->hdev); if (!report1 || report1->maxfield != 1) return -ENODEV; report2 = picolcd_out_report(REPORT_LCD_DATA, data->hdev); if (!report2 || report2->maxfield != 1) return -ENODEV; spin_lock_irqsave(&data->lock, flags); if ((data->status & PICOLCD_FAILED)) { spin_unlock_irqrestore(&data->lock, flags); return -ENODEV; } hid_set_field(report1->field[0], 0, chip << 2); hid_set_field(report1->field[0], 1, 0x02); hid_set_field(report1->field[0], 2, 0x00); hid_set_field(report1->field[0], 3, 0x00); hid_set_field(report1->field[0], 4, 0xb8 | tile); hid_set_field(report1->field[0], 5, 0x00); hid_set_field(report1->field[0], 6, 0x00); hid_set_field(report1->field[0], 7, 0x40); hid_set_field(report1->field[0], 8, 0x00); hid_set_field(report1->field[0], 9, 0x00); hid_set_field(report1->field[0], 10, 32); hid_set_field(report2->field[0], 0, (chip << 2) | 0x01); hid_set_field(report2->field[0], 1, 0x00); hid_set_field(report2->field[0], 2, 0x00); hid_set_field(report2->field[0], 3, 32); tdata = vbitmap + (tile * 4 + chip) * 64; for (i = 0; i < 64; i++) if (i < 32) hid_set_field(report1->field[0], 11 + i, tdata[i]); else hid_set_field(report2->field[0], 4 + i - 32, tdata[i]); hid_hw_request(data->hdev, report1, HID_REQ_SET_REPORT); hid_hw_request(data->hdev, report2, HID_REQ_SET_REPORT); spin_unlock_irqrestore(&data->lock, flags); return 0; } /* Translate a single tile*/ static int picolcd_fb_update_tile(u8 *vbitmap, const u8 *bitmap, int bpp, int chip, int tile) { int i, b, changed = 0; u8 tdata[64]; u8 *vdata = vbitmap + (tile * 4 + chip) * 64; if (bpp == 1) { for (b = 7; b >= 0; b--) { const u8 *bdata = bitmap + tile * 256 + chip * 8 + b * 32; for (i = 0; i < 64; i++) { tdata[i] <<= 1; tdata[i] |= (bdata[i/8] >> (i % 8)) & 0x01; } } } else if (bpp == 8) { for (b = 7; b >= 0; b--) { const u8 *bdata = bitmap + (tile * 256 + chip * 8 + b * 32) * 8; for (i = 0; i < 64; i++) { tdata[i] <<= 1; tdata[i] |= (bdata[i] & 0x80) ? 0x01 : 0x00; } } } else { /* Oops, we should never get here! */ WARN_ON(1); return 0; } for (i = 0; i < 64; i++) if (tdata[i] != vdata[i]) { changed = 1; vdata[i] = tdata[i]; } return changed; } void picolcd_fb_refresh(struct picolcd_data *data) { if (data->fb_info) schedule_delayed_work(&data->fb_info->deferred_work, 0); } /* Reconfigure LCD display */ int picolcd_fb_reset(struct picolcd_data *data, int clear) { struct hid_report *report = picolcd_out_report(REPORT_LCD_CMD, data->hdev); struct picolcd_fb_data *fbdata = data->fb_info->par; int i, j; unsigned long flags; static const u8 mapcmd[8] = { 0x00, 0x02, 0x00, 0x64, 0x3f, 0x00, 0x64, 0xc0 }; if (!report || report->maxfield != 1) return -ENODEV; spin_lock_irqsave(&data->lock, flags); for (i = 0; i < 4; i++) { for (j = 0; j < report->field[0]->maxusage; j++) if (j == 0) hid_set_field(report->field[0], j, i << 2); else if (j < sizeof(mapcmd)) hid_set_field(report->field[0], j, mapcmd[j]); else hid_set_field(report->field[0], j, 0); hid_hw_request(data->hdev, report, HID_REQ_SET_REPORT); } spin_unlock_irqrestore(&data->lock, flags); if (clear) { memset(fbdata->vbitmap, 0, PICOLCDFB_SIZE); memset(fbdata->bitmap, 0, PICOLCDFB_SIZE*fbdata->bpp); } fbdata->force = 1; /* schedule first output of framebuffer */ if (fbdata->ready) schedule_delayed_work(&data->fb_info->deferred_work, 0); else fbdata->ready = 1; return 0; } /* Update fb_vbitmap from the screen_buffer and send changed tiles to device */ static void picolcd_fb_update(struct fb_info *info) { int chip, tile, n; unsigned long flags; struct picolcd_fb_data *fbdata = info->par; struct picolcd_data *data; mutex_lock(&info->lock); spin_lock_irqsave(&fbdata->lock, flags); if (!fbdata->ready && fbdata->picolcd) picolcd_fb_reset(fbdata->picolcd, 0); spin_unlock_irqrestore(&fbdata->lock, flags); /* * Translate the framebuffer into the format needed by the PicoLCD. * See display layout above. * Do this one tile after the other and push those tiles that changed. * * Wait for our IO to complete as otherwise we might flood the queue! */ n = 0; for (chip = 0; chip < 4; chip++) for (tile = 0; tile < 8; tile++) { if (!fbdata->force && !picolcd_fb_update_tile( fbdata->vbitmap, fbdata->bitmap, fbdata->bpp, chip, tile)) continue; n += 2; if (n >= HID_OUTPUT_FIFO_SIZE / 2) { spin_lock_irqsave(&fbdata->lock, flags); data = fbdata->picolcd; spin_unlock_irqrestore(&fbdata->lock, flags); mutex_unlock(&info->lock); if (!data) return; hid_hw_wait(data->hdev); mutex_lock(&info->lock); n = 0; } spin_lock_irqsave(&fbdata->lock, flags); data = fbdata->picolcd; spin_unlock_irqrestore(&fbdata->lock, flags); if (!data || picolcd_fb_send_tile(data, fbdata->vbitmap, chip, tile)) goto out; } fbdata->force = false; if (n) { spin_lock_irqsave(&fbdata->lock, flags); data = fbdata->picolcd; spin_unlock_irqrestore(&fbdata->lock, flags); mutex_unlock(&info->lock); if (data) hid_hw_wait(data->hdev); return; } out: mutex_unlock(&info->lock); } static int picolcd_fb_blank(int blank, struct fb_info *info) { /* We let fb notification do this for us via lcd/backlight device */ return 0; } static void picolcd_fb_destroy(struct fb_info *info) { struct picolcd_fb_data *fbdata = info->par; /* make sure no work is deferred */ fb_deferred_io_cleanup(info); /* No thirdparty should ever unregister our framebuffer! */ WARN_ON(fbdata->picolcd != NULL); vfree((u8 *)info->fix.smem_start); framebuffer_release(info); } static int picolcd_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { __u32 bpp = var->bits_per_pixel; __u32 activate = var->activate; /* only allow 1/8 bit depth (8-bit is grayscale) */ *var = picolcdfb_var; var->activate = activate; if (bpp >= 8) { var->bits_per_pixel = 8; var->red.length = 8; var->green.length = 8; var->blue.length = 8; } else { var->bits_per_pixel = 1; var->red.length = 1; var->green.length = 1; var->blue.length = 1; } return 0; } static int picolcd_set_par(struct fb_info *info) { struct picolcd_fb_data *fbdata = info->par; u8 *tmp_fb, *o_fb; if (info->var.bits_per_pixel == fbdata->bpp) return 0; /* switch between 1/8 bit depths */ if (info->var.bits_per_pixel != 1 && info->var.bits_per_pixel != 8) return -EINVAL; o_fb = fbdata->bitmap; tmp_fb = kmalloc_array(PICOLCDFB_SIZE, info->var.bits_per_pixel, GFP_KERNEL); if (!tmp_fb) return -ENOMEM; /* translate FB content to new bits-per-pixel */ if (info->var.bits_per_pixel == 1) { int i, b; for (i = 0; i < PICOLCDFB_SIZE; i++) { u8 p = 0; for (b = 0; b < 8; b++) { p <<= 1; p |= o_fb[i*8+b] ? 0x01 : 0x00; } tmp_fb[i] = p; } memcpy(o_fb, tmp_fb, PICOLCDFB_SIZE); info->fix.visual = FB_VISUAL_MONO01; info->fix.line_length = PICOLCDFB_WIDTH / 8; } else { int i; memcpy(tmp_fb, o_fb, PICOLCDFB_SIZE); for (i = 0; i < PICOLCDFB_SIZE * 8; i++) o_fb[i] = tmp_fb[i/8] & (0x01 << (7 - i % 8)) ? 0xff : 0x00; info->fix.visual = FB_VISUAL_DIRECTCOLOR; info->fix.line_length = PICOLCDFB_WIDTH; } kfree(tmp_fb); fbdata->bpp = info->var.bits_per_pixel; return 0; } static void picolcdfb_ops_damage_range(struct fb_info *info, off_t off, size_t len) { if (!info->par) return; schedule_delayed_work(&info->deferred_work, 0); } static void picolcdfb_ops_damage_area(struct fb_info *info, u32 x, u32 y, u32 width, u32 height) { if (!info->par) return; schedule_delayed_work(&info->deferred_work, 0); } FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(picolcdfb_ops, picolcdfb_ops_damage_range, picolcdfb_ops_damage_area) static const struct fb_ops picolcdfb_ops = { .owner = THIS_MODULE, FB_DEFAULT_DEFERRED_OPS(picolcdfb_ops), .fb_destroy = picolcd_fb_destroy, .fb_blank = picolcd_fb_blank, .fb_check_var = picolcd_fb_check_var, .fb_set_par = picolcd_set_par, }; /* Callback from deferred IO workqueue */ static void picolcd_fb_deferred_io(struct fb_info *info, struct list_head *pagereflist) { picolcd_fb_update(info); } static const struct fb_deferred_io picolcd_fb_defio = { .delay = HZ / PICOLCDFB_UPDATE_RATE_DEFAULT, .deferred_io = picolcd_fb_deferred_io, }; /* * The "fb_update_rate" sysfs attribute */ static ssize_t picolcd_fb_update_rate_show(struct device *dev, struct device_attribute *attr, char *buf) { struct picolcd_data *data = dev_get_drvdata(dev); struct picolcd_fb_data *fbdata = data->fb_info->par; unsigned i, fb_update_rate = fbdata->update_rate; size_t ret = 0; for (i = 1; i <= PICOLCDFB_UPDATE_RATE_LIMIT; i++) if (i == fb_update_rate) ret += sysfs_emit_at(buf, ret, "[%u] ", i); else ret += sysfs_emit_at(buf, ret, "%u ", i); if (ret > 0) buf[min(ret, (size_t)PAGE_SIZE)-1] = '\n'; return ret; } static ssize_t picolcd_fb_update_rate_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct picolcd_data *data = dev_get_drvdata(dev); struct picolcd_fb_data *fbdata = data->fb_info->par; int i; unsigned u; if (count < 1 || count > 10) return -EINVAL; i = sscanf(buf, "%u", &u); if (i != 1) return -EINVAL; if (u > PICOLCDFB_UPDATE_RATE_LIMIT) return -ERANGE; else if (u == 0) u = PICOLCDFB_UPDATE_RATE_DEFAULT; fbdata->update_rate = u; data->fb_info->fbdefio->delay = HZ / fbdata->update_rate; return count; } static DEVICE_ATTR(fb_update_rate, 0664, picolcd_fb_update_rate_show, picolcd_fb_update_rate_store); /* initialize Framebuffer device */ int picolcd_init_framebuffer(struct picolcd_data *data) { struct device *dev = &data->hdev->dev; struct fb_info *info = NULL; struct picolcd_fb_data *fbdata = NULL; int i, error = -ENOMEM; u32 *palette; /* The extra memory is: * - 256*u32 for pseudo_palette * - struct fb_deferred_io */ info = framebuffer_alloc(256 * sizeof(u32) + sizeof(struct fb_deferred_io) + sizeof(struct picolcd_fb_data) + PICOLCDFB_SIZE, dev); if (!info) goto err_nomem; info->fbdefio = info->par; *info->fbdefio = picolcd_fb_defio; info->par += sizeof(struct fb_deferred_io); palette = info->par; info->par += 256 * sizeof(u32); for (i = 0; i < 256; i++) palette[i] = i > 0 && i < 16 ? 0xff : 0; info->pseudo_palette = palette; info->fbops = &picolcdfb_ops; info->var = picolcdfb_var; info->fix = picolcdfb_fix; info->fix.smem_len = PICOLCDFB_SIZE*8; #ifdef CONFIG_FB_BACKLIGHT #ifdef CONFIG_HID_PICOLCD_BACKLIGHT info->bl_dev = data->backlight; #endif #endif #ifdef CONFIG_HID_PICOLCD_LCD info->lcd_dev = data->lcd; #endif fbdata = info->par; spin_lock_init(&fbdata->lock); fbdata->picolcd = data; fbdata->update_rate = PICOLCDFB_UPDATE_RATE_DEFAULT; fbdata->bpp = picolcdfb_var.bits_per_pixel; fbdata->force = 1; fbdata->vbitmap = info->par + sizeof(struct picolcd_fb_data); fbdata->bitmap = vmalloc(PICOLCDFB_SIZE*8); if (fbdata->bitmap == NULL) { dev_err(dev, "can't get a free page for framebuffer\n"); goto err_nomem; } info->flags |= FBINFO_VIRTFB; info->screen_buffer = fbdata->bitmap; info->fix.smem_start = (unsigned long)fbdata->bitmap; memset(fbdata->vbitmap, 0xff, PICOLCDFB_SIZE); data->fb_info = info; error = picolcd_fb_reset(data, 1); if (error) { dev_err(dev, "failed to configure display\n"); goto err_cleanup; } error = device_create_file(dev, &dev_attr_fb_update_rate); if (error) { dev_err(dev, "failed to create sysfs attributes\n"); goto err_cleanup; } fb_deferred_io_init(info); error = register_framebuffer(info); if (error) { dev_err(dev, "failed to register framebuffer\n"); goto err_sysfs; } return 0; err_sysfs: device_remove_file(dev, &dev_attr_fb_update_rate); fb_deferred_io_cleanup(info); err_cleanup: data->fb_info = NULL; err_nomem: if (fbdata) vfree(fbdata->bitmap); framebuffer_release(info); return error; } void picolcd_exit_framebuffer(struct picolcd_data *data) { struct fb_info *info = data->fb_info; struct picolcd_fb_data *fbdata; unsigned long flags; if (!info) return; device_remove_file(&data->hdev->dev, &dev_attr_fb_update_rate); fbdata = info->par; /* disconnect framebuffer from HID dev */ spin_lock_irqsave(&fbdata->lock, flags); fbdata->picolcd = NULL; spin_unlock_irqrestore(&fbdata->lock, flags); /* make sure there is no running update - thus that fbdata->picolcd * once obtained under lock is guaranteed not to get free() under * the feet of the deferred work */ flush_delayed_work(&info->deferred_work); data->fb_info = NULL; unregister_framebuffer(info); }
20 4 40 14 14 14 14 14 14 14 14 37 19 11 42 22 22 22 22 22 22 22 22 22 23 23 23 15 12 16 14 23 5 15 16 14 10 10 6 8 14 10 7 5 8 8 8 2 25 25 25 24 24 2 5 4 20 20 5 15 2 14 2 14 19 18 41 19 25 1 1 1 22 19 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. */ #include "allowedips.h" #include "peer.h" enum { MAX_ALLOWEDIPS_DEPTH = 129 }; static struct kmem_cache *node_cache; static void swap_endian(u8 *dst, const u8 *src, u8 bits) { if (bits == 32) { *(u32 *)dst = be32_to_cpu(*(const __be32 *)src); } else if (bits == 128) { ((u64 *)dst)[0] = get_unaligned_be64(src); ((u64 *)dst)[1] = get_unaligned_be64(src + 8); } } static void copy_and_assign_cidr(struct allowedips_node *node, const u8 *src, u8 cidr, u8 bits) { node->cidr = cidr; node->bit_at_a = cidr / 8U; #ifdef __LITTLE_ENDIAN node->bit_at_a ^= (bits / 8U - 1U) % 8U; #endif node->bit_at_b = 7U - (cidr % 8U); node->bitlen = bits; memcpy(node->bits, src, bits / 8U); } static inline u8 choose(struct allowedips_node *node, const u8 *key) { return (key[node->bit_at_a] >> node->bit_at_b) & 1; } static void push_rcu(struct allowedips_node **stack, struct allowedips_node __rcu *p, unsigned int *len) { if (rcu_access_pointer(p)) { if (WARN_ON(IS_ENABLED(DEBUG) && *len >= MAX_ALLOWEDIPS_DEPTH)) return; stack[(*len)++] = rcu_dereference_raw(p); } } static void node_free_rcu(struct rcu_head *rcu) { kmem_cache_free(node_cache, container_of(rcu, struct allowedips_node, rcu)); } static void root_free_rcu(struct rcu_head *rcu) { struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_DEPTH] = { container_of(rcu, struct allowedips_node, rcu) }; unsigned int len = 1; while (len > 0 && (node = stack[--len])) { push_rcu(stack, node->bit[0], &len); push_rcu(stack, node->bit[1], &len); kmem_cache_free(node_cache, node); } } static void root_remove_peer_lists(struct allowedips_node *root) { struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_DEPTH] = { root }; unsigned int len = 1; while (len > 0 && (node = stack[--len])) { push_rcu(stack, node->bit[0], &len); push_rcu(stack, node->bit[1], &len); if (rcu_access_pointer(node->peer)) list_del(&node->peer_list); } } static unsigned int fls128(u64 a, u64 b) { return a ? fls64(a) + 64U : fls64(b); } static u8 common_bits(const struct allowedips_node *node, const u8 *key, u8 bits) { if (bits == 32) return 32U - fls(*(const u32 *)node->bits ^ *(const u32 *)key); else if (bits == 128) return 128U - fls128( *(const u64 *)&node->bits[0] ^ *(const u64 *)&key[0], *(const u64 *)&node->bits[8] ^ *(const u64 *)&key[8]); return 0; } static bool prefix_matches(const struct allowedips_node *node, const u8 *key, u8 bits) { /* This could be much faster if it actually just compared the common * bits properly, by precomputing a mask bswap(~0 << (32 - cidr)), and * the rest, but it turns out that common_bits is already super fast on * modern processors, even taking into account the unfortunate bswap. * So, we just inline it like this instead. */ return common_bits(node, key, bits) >= node->cidr; } static struct allowedips_node *find_node(struct allowedips_node *trie, u8 bits, const u8 *key) { struct allowedips_node *node = trie, *found = NULL; while (node && prefix_matches(node, key, bits)) { if (rcu_access_pointer(node->peer)) found = node; if (node->cidr == bits) break; node = rcu_dereference_bh(node->bit[choose(node, key)]); } return found; } /* Returns a strong reference to a peer */ static struct wg_peer *lookup(struct allowedips_node __rcu *root, u8 bits, const void *be_ip) { /* Aligned so it can be passed to fls/fls64 */ u8 ip[16] __aligned(__alignof(u64)); struct allowedips_node *node; struct wg_peer *peer = NULL; swap_endian(ip, be_ip, bits); rcu_read_lock_bh(); retry: node = find_node(rcu_dereference_bh(root), bits, ip); if (node) { peer = wg_peer_get_maybe_zero(rcu_dereference_bh(node->peer)); if (!peer) goto retry; } rcu_read_unlock_bh(); return peer; } static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key, u8 cidr, u8 bits, struct allowedips_node **rnode, struct mutex *lock) { struct allowedips_node *node = rcu_dereference_protected(trie, lockdep_is_held(lock)); struct allowedips_node *parent = NULL; bool exact = false; while (node && node->cidr <= cidr && prefix_matches(node, key, bits)) { parent = node; if (parent->cidr == cidr) { exact = true; break; } node = rcu_dereference_protected(parent->bit[choose(parent, key)], lockdep_is_held(lock)); } *rnode = parent; return exact; } static inline void connect_node(struct allowedips_node __rcu **parent, u8 bit, struct allowedips_node *node) { node->parent_bit_packed = (unsigned long)parent | bit; rcu_assign_pointer(*parent, node); } static inline void choose_and_connect_node(struct allowedips_node *parent, struct allowedips_node *node) { u8 bit = choose(parent, node->bits); connect_node(&parent->bit[bit], bit, node); } static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key, u8 cidr, struct wg_peer *peer, struct mutex *lock) { struct allowedips_node *node, *parent, *down, *newnode; if (unlikely(cidr > bits || !peer)) return -EINVAL; if (!rcu_access_pointer(*trie)) { node = kmem_cache_zalloc(node_cache, GFP_KERNEL); if (unlikely(!node)) return -ENOMEM; RCU_INIT_POINTER(node->peer, peer); list_add_tail(&node->peer_list, &peer->allowedips_list); copy_and_assign_cidr(node, key, cidr, bits); connect_node(trie, 2, node); return 0; } if (node_placement(*trie, key, cidr, bits, &node, lock)) { rcu_assign_pointer(node->peer, peer); list_move_tail(&node->peer_list, &peer->allowedips_list); return 0; } newnode = kmem_cache_zalloc(node_cache, GFP_KERNEL); if (unlikely(!newnode)) return -ENOMEM; RCU_INIT_POINTER(newnode->peer, peer); list_add_tail(&newnode->peer_list, &peer->allowedips_list); copy_and_assign_cidr(newnode, key, cidr, bits); if (!node) { down = rcu_dereference_protected(*trie, lockdep_is_held(lock)); } else { const u8 bit = choose(node, key); down = rcu_dereference_protected(node->bit[bit], lockdep_is_held(lock)); if (!down) { connect_node(&node->bit[bit], bit, newnode); return 0; } } cidr = min(cidr, common_bits(down, key, bits)); parent = node; if (newnode->cidr == cidr) { choose_and_connect_node(newnode, down); if (!parent) connect_node(trie, 2, newnode); else choose_and_connect_node(parent, newnode); return 0; } node = kmem_cache_zalloc(node_cache, GFP_KERNEL); if (unlikely(!node)) { list_del(&newnode->peer_list); kmem_cache_free(node_cache, newnode); return -ENOMEM; } INIT_LIST_HEAD(&node->peer_list); copy_and_assign_cidr(node, newnode->bits, cidr, bits); choose_and_connect_node(node, down); choose_and_connect_node(node, newnode); if (!parent) connect_node(trie, 2, node); else choose_and_connect_node(parent, node); return 0; } static void remove_node(struct allowedips_node *node, struct mutex *lock) { struct allowedips_node *child, **parent_bit, *parent; bool free_parent; list_del_init(&node->peer_list); RCU_INIT_POINTER(node->peer, NULL); if (node->bit[0] && node->bit[1]) return; child = rcu_dereference_protected(node->bit[!rcu_access_pointer(node->bit[0])], lockdep_is_held(lock)); if (child) child->parent_bit_packed = node->parent_bit_packed; parent_bit = (struct allowedips_node **)(node->parent_bit_packed & ~3UL); *parent_bit = child; parent = (void *)parent_bit - offsetof(struct allowedips_node, bit[node->parent_bit_packed & 1]); free_parent = !rcu_access_pointer(node->bit[0]) && !rcu_access_pointer(node->bit[1]) && (node->parent_bit_packed & 3) <= 1 && !rcu_access_pointer(parent->peer); if (free_parent) child = rcu_dereference_protected(parent->bit[!(node->parent_bit_packed & 1)], lockdep_is_held(lock)); call_rcu(&node->rcu, node_free_rcu); if (!free_parent) return; if (child) child->parent_bit_packed = parent->parent_bit_packed; *(struct allowedips_node **)(parent->parent_bit_packed & ~3UL) = child; call_rcu(&parent->rcu, node_free_rcu); } static int remove(struct allowedips_node __rcu **trie, u8 bits, const u8 *key, u8 cidr, struct wg_peer *peer, struct mutex *lock) { struct allowedips_node *node; if (unlikely(cidr > bits)) return -EINVAL; if (!rcu_access_pointer(*trie) || !node_placement(*trie, key, cidr, bits, &node, lock) || peer != rcu_access_pointer(node->peer)) return 0; remove_node(node, lock); return 0; } void wg_allowedips_init(struct allowedips *table) { table->root4 = table->root6 = NULL; table->seq = 1; } void wg_allowedips_free(struct allowedips *table, struct mutex *lock) { struct allowedips_node __rcu *old4 = table->root4, *old6 = table->root6; ++table->seq; RCU_INIT_POINTER(table->root4, NULL); RCU_INIT_POINTER(table->root6, NULL); if (rcu_access_pointer(old4)) { struct allowedips_node *node = rcu_dereference_protected(old4, lockdep_is_held(lock)); root_remove_peer_lists(node); call_rcu(&node->rcu, root_free_rcu); } if (rcu_access_pointer(old6)) { struct allowedips_node *node = rcu_dereference_protected(old6, lockdep_is_held(lock)); root_remove_peer_lists(node); call_rcu(&node->rcu, root_free_rcu); } } int wg_allowedips_insert_v4(struct allowedips *table, const struct in_addr *ip, u8 cidr, struct wg_peer *peer, struct mutex *lock) { /* Aligned so it can be passed to fls */ u8 key[4] __aligned(__alignof(u32)); ++table->seq; swap_endian(key, (const u8 *)ip, 32); return add(&table->root4, 32, key, cidr, peer, lock); } int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip, u8 cidr, struct wg_peer *peer, struct mutex *lock) { /* Aligned so it can be passed to fls64 */ u8 key[16] __aligned(__alignof(u64)); ++table->seq; swap_endian(key, (const u8 *)ip, 128); return add(&table->root6, 128, key, cidr, peer, lock); } int wg_allowedips_remove_v4(struct allowedips *table, const struct in_addr *ip, u8 cidr, struct wg_peer *peer, struct mutex *lock) { /* Aligned so it can be passed to fls */ u8 key[4] __aligned(__alignof(u32)); ++table->seq; swap_endian(key, (const u8 *)ip, 32); return remove(&table->root4, 32, key, cidr, peer, lock); } int wg_allowedips_remove_v6(struct allowedips *table, const struct in6_addr *ip, u8 cidr, struct wg_peer *peer, struct mutex *lock) { /* Aligned so it can be passed to fls64 */ u8 key[16] __aligned(__alignof(u64)); ++table->seq; swap_endian(key, (const u8 *)ip, 128); return remove(&table->root6, 128, key, cidr, peer, lock); } void wg_allowedips_remove_by_peer(struct allowedips *table, struct wg_peer *peer, struct mutex *lock) { struct allowedips_node *node, *tmp; if (list_empty(&peer->allowedips_list)) return; ++table->seq; list_for_each_entry_safe(node, tmp, &peer->allowedips_list, peer_list) remove_node(node, lock); } int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr) { const unsigned int cidr_bytes = DIV_ROUND_UP(node->cidr, 8U); swap_endian(ip, node->bits, node->bitlen); memset(ip + cidr_bytes, 0, node->bitlen / 8U - cidr_bytes); if (node->cidr) ip[cidr_bytes - 1U] &= ~0U << (-node->cidr % 8U); *cidr = node->cidr; return node->bitlen == 32 ? AF_INET : AF_INET6; } /* Returns a strong reference to a peer */ struct wg_peer *wg_allowedips_lookup_dst(struct allowedips *table, struct sk_buff *skb) { if (skb->protocol == htons(ETH_P_IP)) return lookup(table->root4, 32, &ip_hdr(skb)->daddr); else if (skb->protocol == htons(ETH_P_IPV6)) return lookup(table->root6, 128, &ipv6_hdr(skb)->daddr); return NULL; } /* Returns a strong reference to a peer */ struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table, struct sk_buff *skb) { if (skb->protocol == htons(ETH_P_IP)) return lookup(table->root4, 32, &ip_hdr(skb)->saddr); else if (skb->protocol == htons(ETH_P_IPV6)) return lookup(table->root6, 128, &ipv6_hdr(skb)->saddr); return NULL; } int __init wg_allowedips_slab_init(void) { node_cache = KMEM_CACHE(allowedips_node, 0); return node_cache ? 0 : -ENOMEM; } void wg_allowedips_slab_uninit(void) { rcu_barrier(); kmem_cache_destroy(node_cache); } #include "selftest/allowedips.c"
4441 6118 3862 6813 1953 6118 123 152 152 151 152 6813 7 7 7 604 2041 2449 389 389 14 14 234 234 3786 186 123 717 7 45 10 6 298 189 3822 7 3244 3246 3251 2435 1954 3243 17 157 172 9260 9256 4 9260 9255 9255 9279 101 440 49 3818 441 449 354 4177 3812 176 11 175 28 129 26 69 166 262 262 262 262 1954 1945 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* memcontrol.h - Memory Controller * * Copyright IBM Corporation, 2007 * Author Balbir Singh <balbir@linux.vnet.ibm.com> * * Copyright 2007 OpenVZ SWsoft Inc * Author: Pavel Emelianov <xemul@openvz.org> */ #ifndef _LINUX_MEMCONTROL_H #define _LINUX_MEMCONTROL_H #include <linux/cgroup.h> #include <linux/vm_event_item.h> #include <linux/hardirq.h> #include <linux/jump_label.h> #include <linux/kernel.h> #include <linux/page_counter.h> #include <linux/vmpressure.h> #include <linux/eventfd.h> #include <linux/mm.h> #include <linux/vmstat.h> #include <linux/writeback.h> #include <linux/page-flags.h> #include <linux/shrinker.h> struct mem_cgroup; struct obj_cgroup; struct page; struct mm_struct; struct kmem_cache; /* Cgroup-specific page state, on top of universal node page state */ enum memcg_stat_item { MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, MEMCG_SOCK, MEMCG_PERCPU_B, MEMCG_VMALLOC, MEMCG_KMEM, MEMCG_ZSWAP_B, MEMCG_ZSWAPPED, MEMCG_NR_STAT, }; enum memcg_memory_event { MEMCG_LOW, MEMCG_HIGH, MEMCG_MAX, MEMCG_OOM, MEMCG_OOM_KILL, MEMCG_OOM_GROUP_KILL, MEMCG_SWAP_HIGH, MEMCG_SWAP_MAX, MEMCG_SWAP_FAIL, MEMCG_SOCK_THROTTLED, MEMCG_NR_MEMORY_EVENTS, }; struct mem_cgroup_reclaim_cookie { pg_data_t *pgdat; int generation; }; #ifdef CONFIG_MEMCG #define MEM_CGROUP_ID_SHIFT 16 struct mem_cgroup_id { int id; refcount_t ref; }; struct memcg_vmstats_percpu; struct memcg1_events_percpu; struct memcg_vmstats; struct lruvec_stats_percpu; struct lruvec_stats; struct mem_cgroup_reclaim_iter { struct mem_cgroup *position; /* scan generation, increased every round-trip */ atomic_t generation; }; /* * per-node information in memory controller. */ struct mem_cgroup_per_node { /* Keep the read-only fields at the start */ struct mem_cgroup *memcg; /* Back pointer, we cannot */ /* use container_of */ struct lruvec_stats_percpu __percpu *lruvec_stats_percpu; struct lruvec_stats *lruvec_stats; struct shrinker_info __rcu *shrinker_info; #ifdef CONFIG_MEMCG_V1 /* * Memcg-v1 only stuff in middle as buffer between read mostly fields * and update often fields to avoid false sharing. If v1 stuff is * not present, an explicit padding is needed. */ struct rb_node tree_node; /* RB tree node */ unsigned long usage_in_excess;/* Set to the value by which */ /* the soft limit is exceeded*/ bool on_tree; #else CACHELINE_PADDING(_pad1_); #endif /* Fields which get updated often at the end. */ struct lruvec lruvec; CACHELINE_PADDING(_pad2_); unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; struct mem_cgroup_reclaim_iter iter; #ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC /* slab stats for nmi context */ atomic_t slab_reclaimable; atomic_t slab_unreclaimable; #endif }; struct mem_cgroup_threshold { struct eventfd_ctx *eventfd; unsigned long threshold; }; /* For threshold */ struct mem_cgroup_threshold_ary { /* An array index points to threshold just below or equal to usage. */ int current_threshold; /* Size of entries[] */ unsigned int size; /* Array of thresholds */ struct mem_cgroup_threshold entries[] __counted_by(size); }; struct mem_cgroup_thresholds { /* Primary thresholds array */ struct mem_cgroup_threshold_ary *primary; /* * Spare threshold array. * This is needed to make mem_cgroup_unregister_event() "never fail". * It must be able to store at least primary->size - 1 entries. */ struct mem_cgroup_threshold_ary *spare; }; /* * Remember four most recent foreign writebacks with dirty pages in this * cgroup. Inode sharing is expected to be uncommon and, even if we miss * one in a given round, we're likely to catch it later if it keeps * foreign-dirtying, so a fairly low count should be enough. * * See mem_cgroup_track_foreign_dirty_slowpath() for details. */ #define MEMCG_CGWB_FRN_CNT 4 struct memcg_cgwb_frn { u64 bdi_id; /* bdi->id of the foreign inode */ int memcg_id; /* memcg->css.id of foreign inode */ u64 at; /* jiffies_64 at the time of dirtying */ struct wb_completion done; /* tracks in-flight foreign writebacks */ }; /* * Bucket for arbitrarily byte-sized objects charged to a memory * cgroup. The bucket can be reparented in one piece when the cgroup * is destroyed, without having to round up the individual references * of all live memory objects in the wild. */ struct obj_cgroup { struct percpu_ref refcnt; struct mem_cgroup *memcg; atomic_t nr_charged_bytes; union { struct list_head list; /* protected by objcg_lock */ struct rcu_head rcu; }; }; /* * The memory controller data structure. The memory controller controls both * page cache and RSS per cgroup. We would eventually like to provide * statistics based on the statistics developed by Rik Van Riel for clock-pro, * to help the administrator determine what knobs to tune. */ struct mem_cgroup { struct cgroup_subsys_state css; /* Private memcg ID. Used to ID objects that outlive the cgroup */ struct mem_cgroup_id id; /* Accounted resources */ struct page_counter memory; /* Both v1 & v2 */ union { struct page_counter swap; /* v2 only */ struct page_counter memsw; /* v1 only */ }; /* registered local peak watchers */ struct list_head memory_peaks; struct list_head swap_peaks; spinlock_t peaks_lock; /* Range enforcement for interrupt charges */ struct work_struct high_work; #ifdef CONFIG_ZSWAP unsigned long zswap_max; /* * Prevent pages from this memcg from being written back from zswap to * swap, and from being swapped out on zswap store failures. */ bool zswap_writeback; #endif /* vmpressure notifications */ struct vmpressure vmpressure; /* * Should the OOM killer kill all belonging tasks, had it kill one? */ bool oom_group; int swappiness; /* memory.events and memory.events.local */ struct cgroup_file events_file; struct cgroup_file events_local_file; /* handle for "memory.swap.events" */ struct cgroup_file swap_events_file; /* memory.stat */ struct memcg_vmstats *vmstats; /* memory.events */ atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS]; #ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC /* MEMCG_KMEM for nmi context */ atomic_t kmem_stat; #endif /* * Hint of reclaim pressure for socket memroy management. Note * that this indicator should NOT be used in legacy cgroup mode * where socket memory is accounted/charged separately. */ u64 socket_pressure; #if BITS_PER_LONG < 64 seqlock_t socket_pressure_seqlock; #endif int kmemcg_id; /* * memcg->objcg is wiped out as a part of the objcg repaprenting * process. memcg->orig_objcg preserves a pointer (and a reference) * to the original objcg until the end of live of memcg. */ struct obj_cgroup __rcu *objcg; struct obj_cgroup *orig_objcg; /* list of inherited objcgs, protected by objcg_lock */ struct list_head objcg_list; struct memcg_vmstats_percpu __percpu *vmstats_percpu; #ifdef CONFIG_CGROUP_WRITEBACK struct list_head cgwb_list; struct wb_domain cgwb_domain; struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT]; #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE struct deferred_split deferred_split_queue; #endif #ifdef CONFIG_LRU_GEN_WALKS_MMU /* per-memcg mm_struct list */ struct lru_gen_mm_list mm_list; #endif #ifdef CONFIG_MEMCG_V1 /* Legacy consumer-oriented counters */ struct page_counter kmem; /* v1 only */ struct page_counter tcpmem; /* v1 only */ struct memcg1_events_percpu __percpu *events_percpu; unsigned long soft_limit; /* protected by memcg_oom_lock */ bool oom_lock; int under_oom; /* OOM-Killer disable */ int oom_kill_disable; /* protect arrays of thresholds */ struct mutex thresholds_lock; /* thresholds for memory usage. RCU-protected */ struct mem_cgroup_thresholds thresholds; /* thresholds for mem+swap usage. RCU-protected */ struct mem_cgroup_thresholds memsw_thresholds; /* For oom notifier event fd */ struct list_head oom_notify; /* Legacy tcp memory accounting */ bool tcpmem_active; int tcpmem_pressure; /* List of events which userspace want to receive */ struct list_head event_list; spinlock_t event_list_lock; #endif /* CONFIG_MEMCG_V1 */ struct mem_cgroup_per_node *nodeinfo[]; }; /* * size of first charge trial. * TODO: maybe necessary to use big numbers in big irons or dynamic based of the * workload. */ #define MEMCG_CHARGE_BATCH 64U extern struct mem_cgroup *root_mem_cgroup; enum page_memcg_data_flags { /* page->memcg_data is a pointer to an slabobj_ext vector */ MEMCG_DATA_OBJEXTS = (1UL << 0), /* page has been accounted as a non-slab kernel page */ MEMCG_DATA_KMEM = (1UL << 1), /* the next bit after the last actual flag */ __NR_MEMCG_DATA_FLAGS = (1UL << 2), }; #define __OBJEXTS_ALLOC_FAIL MEMCG_DATA_OBJEXTS #define __FIRST_OBJEXT_FLAG __NR_MEMCG_DATA_FLAGS #else /* CONFIG_MEMCG */ #define __OBJEXTS_ALLOC_FAIL (1UL << 0) #define __FIRST_OBJEXT_FLAG (1UL << 0) #endif /* CONFIG_MEMCG */ enum objext_flags { /* * Use bit 0 with zero other bits to signal that slabobj_ext vector * failed to allocate. The same bit 0 with valid upper bits means * MEMCG_DATA_OBJEXTS. */ OBJEXTS_ALLOC_FAIL = __OBJEXTS_ALLOC_FAIL, /* slabobj_ext vector allocated with kmalloc_nolock() */ OBJEXTS_NOSPIN_ALLOC = __FIRST_OBJEXT_FLAG, /* the next bit after the last actual flag */ __NR_OBJEXTS_FLAGS = (__FIRST_OBJEXT_FLAG << 1), }; #define OBJEXTS_FLAGS_MASK (__NR_OBJEXTS_FLAGS - 1) #ifdef CONFIG_MEMCG static inline bool folio_memcg_kmem(struct folio *folio); /* * After the initialization objcg->memcg is always pointing at * a valid memcg, but can be atomically swapped to the parent memcg. * * The caller must ensure that the returned memcg won't be released. */ static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) { lockdep_assert_once(rcu_read_lock_held() || lockdep_is_held(&cgroup_mutex)); return READ_ONCE(objcg->memcg); } /* * __folio_memcg - Get the memory cgroup associated with a non-kmem folio * @folio: Pointer to the folio. * * Returns a pointer to the memory cgroup associated with the folio, * or NULL. This function assumes that the folio is known to have a * proper memory cgroup pointer. It's not safe to call this function * against some type of folios, e.g. slab folios or ex-slab folios or * kmem folios. */ static inline struct mem_cgroup *__folio_memcg(struct folio *folio) { unsigned long memcg_data = folio->memcg_data; VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio); VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio); return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); } /* * __folio_objcg - get the object cgroup associated with a kmem folio. * @folio: Pointer to the folio. * * Returns a pointer to the object cgroup associated with the folio, * or NULL. This function assumes that the folio is known to have a * proper object cgroup pointer. It's not safe to call this function * against some type of folios, e.g. slab folios or ex-slab folios or * LRU folios. */ static inline struct obj_cgroup *__folio_objcg(struct folio *folio) { unsigned long memcg_data = folio->memcg_data; VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio); VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio); return (struct obj_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); } /* * folio_memcg - Get the memory cgroup associated with a folio. * @folio: Pointer to the folio. * * Returns a pointer to the memory cgroup associated with the folio, * or NULL. This function assumes that the folio is known to have a * proper memory cgroup pointer. It's not safe to call this function * against some type of folios, e.g. slab folios or ex-slab folios. * * For a non-kmem folio any of the following ensures folio and memcg binding * stability: * * - the folio lock * - LRU isolation * - exclusive reference * * For a kmem folio a caller should hold an rcu read lock to protect memcg * associated with a kmem folio from being released. */ static inline struct mem_cgroup *folio_memcg(struct folio *folio) { if (folio_memcg_kmem(folio)) return obj_cgroup_memcg(__folio_objcg(folio)); return __folio_memcg(folio); } /* * folio_memcg_charged - If a folio is charged to a memory cgroup. * @folio: Pointer to the folio. * * Returns true if folio is charged to a memory cgroup, otherwise returns false. */ static inline bool folio_memcg_charged(struct folio *folio) { return folio->memcg_data != 0; } /* * folio_memcg_check - Get the memory cgroup associated with a folio. * @folio: Pointer to the folio. * * Returns a pointer to the memory cgroup associated with the folio, * or NULL. This function unlike folio_memcg() can take any folio * as an argument. It has to be used in cases when it's not known if a folio * has an associated memory cgroup pointer or an object cgroups vector or * an object cgroup. * * For a non-kmem folio any of the following ensures folio and memcg binding * stability: * * - the folio lock * - LRU isolation * - exclusive reference * * For a kmem folio a caller should hold an rcu read lock to protect memcg * associated with a kmem folio from being released. */ static inline struct mem_cgroup *folio_memcg_check(struct folio *folio) { /* * Because folio->memcg_data might be changed asynchronously * for slabs, READ_ONCE() should be used here. */ unsigned long memcg_data = READ_ONCE(folio->memcg_data); if (memcg_data & MEMCG_DATA_OBJEXTS) return NULL; if (memcg_data & MEMCG_DATA_KMEM) { struct obj_cgroup *objcg; objcg = (void *)(memcg_data & ~OBJEXTS_FLAGS_MASK); return obj_cgroup_memcg(objcg); } return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); } static inline struct mem_cgroup *page_memcg_check(struct page *page) { if (PageTail(page)) return NULL; return folio_memcg_check((struct folio *)page); } static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) { struct mem_cgroup *memcg; rcu_read_lock(); retry: memcg = obj_cgroup_memcg(objcg); if (unlikely(!css_tryget(&memcg->css))) goto retry; rcu_read_unlock(); return memcg; } /* * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set. * @folio: Pointer to the folio. * * Checks if the folio has MemcgKmem flag set. The caller must ensure * that the folio has an associated memory cgroup. It's not safe to call * this function against some types of folios, e.g. slab folios. */ static inline bool folio_memcg_kmem(struct folio *folio) { VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page); VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJEXTS, folio); return folio->memcg_data & MEMCG_DATA_KMEM; } static inline bool PageMemcgKmem(struct page *page) { return folio_memcg_kmem(page_folio(page)); } static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) { return (memcg == root_mem_cgroup); } static inline bool mem_cgroup_disabled(void) { return !cgroup_subsys_enabled(memory_cgrp_subsys); } static inline void mem_cgroup_protection(struct mem_cgroup *root, struct mem_cgroup *memcg, unsigned long *min, unsigned long *low) { *min = *low = 0; if (mem_cgroup_disabled()) return; /* * There is no reclaim protection applied to a targeted reclaim. * We are special casing this specific case here because * mem_cgroup_calculate_protection is not robust enough to keep * the protection invariant for calculated effective values for * parallel reclaimers with different reclaim target. This is * especially a problem for tail memcgs (as they have pages on LRU) * which would want to have effective values 0 for targeted reclaim * but a different value for external reclaim. * * Example * Let's have global and A's reclaim in parallel: * | * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G) * |\ * | C (low = 1G, usage = 2.5G) * B (low = 1G, usage = 0.5G) * * For the global reclaim * A.elow = A.low * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow * C.elow = min(C.usage, C.low) * * With the effective values resetting we have A reclaim * A.elow = 0 * B.elow = B.low * C.elow = C.low * * If the global reclaim races with A's reclaim then * B.elow = C.elow = 0 because children_low_usage > A.elow) * is possible and reclaiming B would be violating the protection. * */ if (root == memcg) return; *min = READ_ONCE(memcg->memory.emin); *low = READ_ONCE(memcg->memory.elow); } void mem_cgroup_calculate_protection(struct mem_cgroup *root, struct mem_cgroup *memcg); static inline bool mem_cgroup_unprotected(struct mem_cgroup *target, struct mem_cgroup *memcg) { /* * The root memcg doesn't account charges, and doesn't support * protection. The target memcg's protection is ignored, see * mem_cgroup_calculate_protection() and mem_cgroup_protection() */ return mem_cgroup_disabled() || mem_cgroup_is_root(memcg) || memcg == target; } static inline bool mem_cgroup_below_low(struct mem_cgroup *target, struct mem_cgroup *memcg) { if (mem_cgroup_unprotected(target, memcg)) return false; return READ_ONCE(memcg->memory.elow) >= page_counter_read(&memcg->memory); } static inline bool mem_cgroup_below_min(struct mem_cgroup *target, struct mem_cgroup *memcg) { if (mem_cgroup_unprotected(target, memcg)) return false; return READ_ONCE(memcg->memory.emin) >= page_counter_read(&memcg->memory); } int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp); /** * mem_cgroup_charge - Charge a newly allocated folio to a cgroup. * @folio: Folio to charge. * @mm: mm context of the allocating task. * @gfp: Reclaim mode. * * Try to charge @folio to the memcg that @mm belongs to, reclaiming * pages according to @gfp if necessary. If @mm is NULL, try to * charge to the active memcg. * * Do not use this for folios allocated for swapin. * * Return: 0 on success. Otherwise, an error code is returned. */ static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp) { if (mem_cgroup_disabled()) return 0; return __mem_cgroup_charge(folio, mm, gfp); } int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp); int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, gfp_t gfp, swp_entry_t entry); void __mem_cgroup_uncharge(struct folio *folio); /** * mem_cgroup_uncharge - Uncharge a folio. * @folio: Folio to uncharge. * * Uncharge a folio previously charged with mem_cgroup_charge(). */ static inline void mem_cgroup_uncharge(struct folio *folio) { if (mem_cgroup_disabled()) return; __mem_cgroup_uncharge(folio); } void __mem_cgroup_uncharge_folios(struct folio_batch *folios); static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios) { if (mem_cgroup_disabled()) return; __mem_cgroup_uncharge_folios(folios); } void mem_cgroup_replace_folio(struct folio *old, struct folio *new); void mem_cgroup_migrate(struct folio *old, struct folio *new); /** * mem_cgroup_lruvec - get the lru list vector for a memcg & node * @memcg: memcg of the wanted lruvec * @pgdat: pglist_data * * Returns the lru list vector holding pages for a given @memcg & * @pgdat combination. This can be the node lruvec, if the memory * controller is disabled. */ static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, struct pglist_data *pgdat) { struct mem_cgroup_per_node *mz; struct lruvec *lruvec; if (mem_cgroup_disabled()) { lruvec = &pgdat->__lruvec; goto out; } if (!memcg) memcg = root_mem_cgroup; mz = memcg->nodeinfo[pgdat->node_id]; lruvec = &mz->lruvec; out: /* * Since a node can be onlined after the mem_cgroup was created, * we have to be prepared to initialize lruvec->pgdat here; * and if offlined then reonlined, we need to reinitialize it. */ if (unlikely(lruvec->pgdat != pgdat)) lruvec->pgdat = pgdat; return lruvec; } /** * folio_lruvec - return lruvec for isolating/putting an LRU folio * @folio: Pointer to the folio. * * This function relies on folio->mem_cgroup being stable. */ static inline struct lruvec *folio_lruvec(struct folio *folio) { struct mem_cgroup *memcg = folio_memcg(folio); VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio); return mem_cgroup_lruvec(memcg, folio_pgdat(folio)); } struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); struct mem_cgroup *get_mem_cgroup_from_current(void); struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio); struct lruvec *folio_lruvec_lock(struct folio *folio); struct lruvec *folio_lruvec_lock_irq(struct folio *folio); struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, unsigned long *flags); #ifdef CONFIG_DEBUG_VM void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio); #else static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) { } #endif static inline struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ return css ? container_of(css, struct mem_cgroup, css) : NULL; } static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg) { return percpu_ref_tryget(&objcg->refcnt); } static inline void obj_cgroup_get(struct obj_cgroup *objcg) { percpu_ref_get(&objcg->refcnt); } static inline void obj_cgroup_get_many(struct obj_cgroup *objcg, unsigned long nr) { percpu_ref_get_many(&objcg->refcnt, nr); } static inline void obj_cgroup_put(struct obj_cgroup *objcg) { if (objcg) percpu_ref_put(&objcg->refcnt); } static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg) { return !memcg || css_tryget(&memcg->css); } static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg) { return !memcg || css_tryget_online(&memcg->css); } static inline void mem_cgroup_put(struct mem_cgroup *memcg) { if (memcg) css_put(&memcg->css); } #define mem_cgroup_from_counter(counter, member) \ container_of(counter, struct mem_cgroup, member) struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, struct mem_cgroup *, struct mem_cgroup_reclaim_cookie *); void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, int (*)(struct task_struct *, void *), void *arg); static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) { if (mem_cgroup_disabled()) return 0; return memcg->id.id; } struct mem_cgroup *mem_cgroup_from_id(unsigned short id); #ifdef CONFIG_SHRINKER_DEBUG static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg) { return memcg ? cgroup_ino(memcg->css.cgroup) : 0; } struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino); #endif static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) { return mem_cgroup_from_css(seq_css(m)); } static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) { struct mem_cgroup_per_node *mz; if (mem_cgroup_disabled()) return NULL; mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); return mz->memcg; } /** * parent_mem_cgroup - find the accounting parent of a memcg * @memcg: memcg whose parent to find * * Returns the parent memcg, or NULL if this is the root. */ static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) { return mem_cgroup_from_css(memcg->css.parent); } static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, struct mem_cgroup *root) { if (root == memcg) return true; return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); } static inline bool mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *memcg) { struct mem_cgroup *task_memcg; bool match = false; rcu_read_lock(); task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); if (task_memcg) match = mem_cgroup_is_descendant(task_memcg, memcg); rcu_read_unlock(); return match; } struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio); ino_t page_cgroup_ino(struct page *page); static inline bool mem_cgroup_online(struct mem_cgroup *memcg) { if (mem_cgroup_disabled()) return true; return !!(memcg->css.flags & CSS_ONLINE); } void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, int zid, int nr_pages); static inline unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx) { struct mem_cgroup_per_node *mz; mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); return READ_ONCE(mz->lru_zone_size[zone_idx][lru]); } void __mem_cgroup_handle_over_high(gfp_t gfp_mask); static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask) { if (unlikely(current->memcg_nr_pages_over_high)) __mem_cgroup_handle_over_high(gfp_mask); } unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); unsigned long mem_cgroup_size(struct mem_cgroup *memcg); void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p); void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg); struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, struct mem_cgroup *oom_domain); void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); /* idx can be of type enum memcg_stat_item or node_stat_item */ void mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx, int val); static inline void mod_memcg_page_state(struct page *page, enum memcg_stat_item idx, int val) { struct mem_cgroup *memcg; if (mem_cgroup_disabled()) return; rcu_read_lock(); memcg = folio_memcg(page_folio(page)); if (memcg) mod_memcg_state(memcg, idx, val); rcu_read_unlock(); } unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx); unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx); unsigned long lruvec_page_state_local(struct lruvec *lruvec, enum node_stat_item idx); void mem_cgroup_flush_stats(struct mem_cgroup *memcg); void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg); void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val); void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, unsigned long count); static inline void count_memcg_folio_events(struct folio *folio, enum vm_event_item idx, unsigned long nr) { struct mem_cgroup *memcg = folio_memcg(folio); if (memcg) count_memcg_events(memcg, idx, nr); } static inline void count_memcg_events_mm(struct mm_struct *mm, enum vm_event_item idx, unsigned long count) { struct mem_cgroup *memcg; if (mem_cgroup_disabled()) return; rcu_read_lock(); memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); if (likely(memcg)) count_memcg_events(memcg, idx, count); rcu_read_unlock(); } static inline void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) { count_memcg_events_mm(mm, idx, 1); } void __memcg_memory_event(struct mem_cgroup *memcg, enum memcg_memory_event event, bool allow_spinning); static inline void memcg_memory_event(struct mem_cgroup *memcg, enum memcg_memory_event event) { __memcg_memory_event(memcg, event, true); } static inline void memcg_memory_event_mm(struct mm_struct *mm, enum memcg_memory_event event) { struct mem_cgroup *memcg; if (mem_cgroup_disabled()) return; rcu_read_lock(); memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); if (likely(memcg)) memcg_memory_event(memcg, event); rcu_read_unlock(); } void split_page_memcg(struct page *first, unsigned order); void folio_split_memcg_refs(struct folio *folio, unsigned old_order, unsigned new_order); static inline u64 cgroup_id_from_mm(struct mm_struct *mm) { struct mem_cgroup *memcg; u64 id; if (mem_cgroup_disabled()) return 0; rcu_read_lock(); memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); if (!memcg) memcg = root_mem_cgroup; id = cgroup_id(memcg->css.cgroup); rcu_read_unlock(); return id; } extern int mem_cgroup_init(void); #else /* CONFIG_MEMCG */ #define MEM_CGROUP_ID_SHIFT 0 #define root_mem_cgroup (NULL) static inline struct mem_cgroup *folio_memcg(struct folio *folio) { return NULL; } static inline bool folio_memcg_charged(struct folio *folio) { return false; } static inline struct mem_cgroup *folio_memcg_check(struct folio *folio) { return NULL; } static inline struct mem_cgroup *page_memcg_check(struct page *page) { return NULL; } static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) { return NULL; } static inline bool folio_memcg_kmem(struct folio *folio) { return false; } static inline bool PageMemcgKmem(struct page *page) { return false; } static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) { return true; } static inline bool mem_cgroup_disabled(void) { return true; } static inline void memcg_memory_event(struct mem_cgroup *memcg, enum memcg_memory_event event) { } static inline void memcg_memory_event_mm(struct mm_struct *mm, enum memcg_memory_event event) { } static inline void mem_cgroup_protection(struct mem_cgroup *root, struct mem_cgroup *memcg, unsigned long *min, unsigned long *low) { *min = *low = 0; } static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, struct mem_cgroup *memcg) { } static inline bool mem_cgroup_unprotected(struct mem_cgroup *target, struct mem_cgroup *memcg) { return true; } static inline bool mem_cgroup_below_low(struct mem_cgroup *target, struct mem_cgroup *memcg) { return false; } static inline bool mem_cgroup_below_min(struct mem_cgroup *target, struct mem_cgroup *memcg) { return false; } static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp) { return 0; } static inline int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp) { return 0; } static inline int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, gfp_t gfp, swp_entry_t entry) { return 0; } static inline void mem_cgroup_uncharge(struct folio *folio) { } static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios) { } static inline void mem_cgroup_replace_folio(struct folio *old, struct folio *new) { } static inline void mem_cgroup_migrate(struct folio *old, struct folio *new) { } static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, struct pglist_data *pgdat) { return &pgdat->__lruvec; } static inline struct lruvec *folio_lruvec(struct folio *folio) { struct pglist_data *pgdat = folio_pgdat(folio); return &pgdat->__lruvec; } static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) { } static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) { return NULL; } static inline bool mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *memcg) { return true; } static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) { return NULL; } static inline struct mem_cgroup *get_mem_cgroup_from_current(void) { return NULL; } static inline struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio) { return NULL; } static inline struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css) { return NULL; } static inline void obj_cgroup_get(struct obj_cgroup *objcg) { } static inline void obj_cgroup_put(struct obj_cgroup *objcg) { } static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg) { return true; } static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg) { return true; } static inline void mem_cgroup_put(struct mem_cgroup *memcg) { } static inline struct lruvec *folio_lruvec_lock(struct folio *folio) { struct pglist_data *pgdat = folio_pgdat(folio); spin_lock(&pgdat->__lruvec.lru_lock); return &pgdat->__lruvec; } static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio) { struct pglist_data *pgdat = folio_pgdat(folio); spin_lock_irq(&pgdat->__lruvec.lru_lock); return &pgdat->__lruvec; } static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, unsigned long *flagsp) { struct pglist_data *pgdat = folio_pgdat(folio); spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp); return &pgdat->__lruvec; } static inline struct mem_cgroup * mem_cgroup_iter(struct mem_cgroup *root, struct mem_cgroup *prev, struct mem_cgroup_reclaim_cookie *reclaim) { return NULL; } static inline void mem_cgroup_iter_break(struct mem_cgroup *root, struct mem_cgroup *prev) { } static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, int (*fn)(struct task_struct *, void *), void *arg) { } static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) { return 0; } static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) { WARN_ON_ONCE(id); /* XXX: This should always return root_mem_cgroup */ return NULL; } #ifdef CONFIG_SHRINKER_DEBUG static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg) { return 0; } static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino) { return NULL; } #endif static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) { return NULL; } static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) { return NULL; } static inline bool mem_cgroup_online(struct mem_cgroup *memcg) { return true; } static inline unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx) { return 0; } static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) { return 0; } static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg) { return 0; } static inline void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) { } static inline void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) { } static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask) { } static inline struct mem_cgroup *mem_cgroup_get_oom_group( struct task_struct *victim, struct mem_cgroup *oom_domain) { return NULL; } static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) { } static inline void mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx, int nr) { } static inline void mod_memcg_page_state(struct page *page, enum memcg_stat_item idx, int val) { } static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) { return 0; } static inline unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx) { return node_page_state(lruvec_pgdat(lruvec), idx); } static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, enum node_stat_item idx) { return node_page_state(lruvec_pgdat(lruvec), idx); } static inline void mem_cgroup_flush_stats(struct mem_cgroup *memcg) { } static inline void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg) { } static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) { struct page *page = virt_to_head_page(p); mod_node_page_state(page_pgdat(page), idx, val); } static inline void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, unsigned long count) { } static inline void count_memcg_folio_events(struct folio *folio, enum vm_event_item idx, unsigned long nr) { } static inline void count_memcg_events_mm(struct mm_struct *mm, enum vm_event_item idx, unsigned long count) { } static inline void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) { } static inline void split_page_memcg(struct page *first, unsigned order) { } static inline void folio_split_memcg_refs(struct folio *folio, unsigned old_order, unsigned new_order) { } static inline u64 cgroup_id_from_mm(struct mm_struct *mm) { return 0; } static inline int mem_cgroup_init(void) { return 0; } #endif /* CONFIG_MEMCG */ /* * Extended information for slab objects stored as an array in page->memcg_data * if MEMCG_DATA_OBJEXTS is set. */ struct slabobj_ext { #ifdef CONFIG_MEMCG struct obj_cgroup *objcg; #endif #ifdef CONFIG_MEM_ALLOC_PROFILING union codetag_ref ref; #endif } __aligned(8); static inline struct lruvec *parent_lruvec(struct lruvec *lruvec) { struct mem_cgroup *memcg; memcg = lruvec_memcg(lruvec); if (!memcg) return NULL; memcg = parent_mem_cgroup(memcg); if (!memcg) return NULL; return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec)); } static inline void unlock_page_lruvec(struct lruvec *lruvec) { spin_unlock(&lruvec->lru_lock); } static inline void unlock_page_lruvec_irq(struct lruvec *lruvec) { spin_unlock_irq(&lruvec->lru_lock); } static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec, unsigned long flags) { spin_unlock_irqrestore(&lruvec->lru_lock, flags); } /* Test requires a stable folio->memcg binding, see folio_memcg() */ static inline bool folio_matches_lruvec(struct folio *folio, struct lruvec *lruvec) { return lruvec_pgdat(lruvec) == folio_pgdat(folio) && lruvec_memcg(lruvec) == folio_memcg(folio); } /* Don't lock again iff page's lruvec locked */ static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio, struct lruvec *locked_lruvec) { if (locked_lruvec) { if (folio_matches_lruvec(folio, locked_lruvec)) return locked_lruvec; unlock_page_lruvec_irq(locked_lruvec); } return folio_lruvec_lock_irq(folio); } /* Don't lock again iff folio's lruvec locked */ static inline void folio_lruvec_relock_irqsave(struct folio *folio, struct lruvec **lruvecp, unsigned long *flags) { if (*lruvecp) { if (folio_matches_lruvec(folio, *lruvecp)) return; unlock_page_lruvec_irqrestore(*lruvecp, *flags); } *lruvecp = folio_lruvec_lock_irqsave(folio, flags); } #ifdef CONFIG_CGROUP_WRITEBACK struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, unsigned long *pheadroom, unsigned long *pdirty, unsigned long *pwriteback); void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio, struct bdi_writeback *wb); static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, struct bdi_writeback *wb) { struct mem_cgroup *memcg; if (mem_cgroup_disabled()) return; memcg = folio_memcg(folio); if (unlikely(memcg && &memcg->css != wb->memcg_css)) mem_cgroup_track_foreign_dirty_slowpath(folio, wb); } void mem_cgroup_flush_foreign(struct bdi_writeback *wb); #else /* CONFIG_CGROUP_WRITEBACK */ static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) { return NULL; } static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, unsigned long *pheadroom, unsigned long *pdirty, unsigned long *pwriteback) { } static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, struct bdi_writeback *wb) { } static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb) { } #endif /* CONFIG_CGROUP_WRITEBACK */ struct sock; #ifdef CONFIG_MEMCG extern struct static_key_false memcg_sockets_enabled_key; #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) void mem_cgroup_sk_alloc(struct sock *sk); void mem_cgroup_sk_free(struct sock *sk); void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk); bool mem_cgroup_sk_charge(const struct sock *sk, unsigned int nr_pages, gfp_t gfp_mask); void mem_cgroup_sk_uncharge(const struct sock *sk, unsigned int nr_pages); #if BITS_PER_LONG < 64 static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg) { u64 val = get_jiffies_64() + HZ; unsigned long flags; write_seqlock_irqsave(&memcg->socket_pressure_seqlock, flags); memcg->socket_pressure = val; write_sequnlock_irqrestore(&memcg->socket_pressure_seqlock, flags); } static inline u64 mem_cgroup_get_socket_pressure(struct mem_cgroup *memcg) { unsigned int seq; u64 val; do { seq = read_seqbegin(&memcg->socket_pressure_seqlock); val = memcg->socket_pressure; } while (read_seqretry(&memcg->socket_pressure_seqlock, seq)); return val; } #else static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg) { WRITE_ONCE(memcg->socket_pressure, jiffies + HZ); } static inline u64 mem_cgroup_get_socket_pressure(struct mem_cgroup *memcg) { return READ_ONCE(memcg->socket_pressure); } #endif int alloc_shrinker_info(struct mem_cgroup *memcg); void free_shrinker_info(struct mem_cgroup *memcg); void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id); void reparent_shrinker_deferred(struct mem_cgroup *memcg); static inline int shrinker_id(struct shrinker *shrinker) { return shrinker->id; } #else #define mem_cgroup_sockets_enabled 0 static inline void mem_cgroup_sk_alloc(struct sock *sk) { } static inline void mem_cgroup_sk_free(struct sock *sk) { } static inline void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk) { } static inline bool mem_cgroup_sk_charge(const struct sock *sk, unsigned int nr_pages, gfp_t gfp_mask) { return false; } static inline void mem_cgroup_sk_uncharge(const struct sock *sk, unsigned int nr_pages) { } static inline void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) { } static inline int shrinker_id(struct shrinker *shrinker) { return -1; } #endif #ifdef CONFIG_MEMCG bool mem_cgroup_kmem_disabled(void); int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); void __memcg_kmem_uncharge_page(struct page *page, int order); /* * The returned objcg pointer is safe to use without additional * protection within a scope. The scope is defined either by * the current task (similar to the "current" global variable) * or by set_active_memcg() pair. * Please, use obj_cgroup_get() to get a reference if the pointer * needs to be used outside of the local scope. */ struct obj_cgroup *current_obj_cgroup(void); struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio); static inline struct obj_cgroup *get_obj_cgroup_from_current(void) { struct obj_cgroup *objcg = current_obj_cgroup(); if (objcg) obj_cgroup_get(objcg); return objcg; } int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); extern struct static_key_false memcg_bpf_enabled_key; static inline bool memcg_bpf_enabled(void) { return static_branch_likely(&memcg_bpf_enabled_key); } extern struct static_key_false memcg_kmem_online_key; static inline bool memcg_kmem_online(void) { return static_branch_likely(&memcg_kmem_online_key); } static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) { if (memcg_kmem_online()) return __memcg_kmem_charge_page(page, gfp, order); return 0; } static inline void memcg_kmem_uncharge_page(struct page *page, int order) { if (memcg_kmem_online()) __memcg_kmem_uncharge_page(page, order); } /* * A helper for accessing memcg's kmem_id, used for getting * corresponding LRU lists. */ static inline int memcg_kmem_id(struct mem_cgroup *memcg) { return memcg ? memcg->kmemcg_id : -1; } struct mem_cgroup *mem_cgroup_from_slab_obj(void *p); static inline void count_objcg_events(struct obj_cgroup *objcg, enum vm_event_item idx, unsigned long count) { struct mem_cgroup *memcg; if (!memcg_kmem_online()) return; rcu_read_lock(); memcg = obj_cgroup_memcg(objcg); count_memcg_events(memcg, idx, count); rcu_read_unlock(); } bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid); void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg); static inline bool memcg_is_dying(struct mem_cgroup *memcg) { return memcg ? css_is_dying(&memcg->css) : false; } #else static inline bool mem_cgroup_kmem_disabled(void) { return true; } static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) { return 0; } static inline void memcg_kmem_uncharge_page(struct page *page, int order) { } static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order) { return 0; } static inline void __memcg_kmem_uncharge_page(struct page *page, int order) { } static inline struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio) { return NULL; } static inline bool memcg_bpf_enabled(void) { return false; } static inline bool memcg_kmem_online(void) { return false; } static inline int memcg_kmem_id(struct mem_cgroup *memcg) { return -1; } static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p) { return NULL; } static inline void count_objcg_events(struct obj_cgroup *objcg, enum vm_event_item idx, unsigned long count) { } static inline ino_t page_cgroup_ino(struct page *page) { return 0; } static inline bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid) { return true; } static inline void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg) { } static inline bool memcg_is_dying(struct mem_cgroup *memcg) { return false; } #endif /* CONFIG_MEMCG */ #if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP) bool obj_cgroup_may_zswap(struct obj_cgroup *objcg); void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size); void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size); bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg); #else static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg) { return true; } static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size) { } static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size) { } static inline bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg) { /* if zswap is disabled, do not block pages going to the swapping device */ return true; } #endif /* Cgroup v1-related declarations */ #ifdef CONFIG_MEMCG_V1 unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, unsigned long *total_scanned); bool mem_cgroup_oom_synchronize(bool wait); static inline bool task_in_memcg_oom(struct task_struct *p) { return p->memcg_in_oom; } static inline void mem_cgroup_enter_user_fault(void) { WARN_ON(current->in_user_fault); current->in_user_fault = 1; } static inline void mem_cgroup_exit_user_fault(void) { WARN_ON(!current->in_user_fault); current->in_user_fault = 0; } void memcg1_swapout(struct folio *folio, swp_entry_t entry); void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages); #else /* CONFIG_MEMCG_V1 */ static inline unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, unsigned long *total_scanned) { return 0; } static inline bool task_in_memcg_oom(struct task_struct *p) { return false; } static inline bool mem_cgroup_oom_synchronize(bool wait) { return false; } static inline void mem_cgroup_enter_user_fault(void) { } static inline void mem_cgroup_exit_user_fault(void) { } static inline void memcg1_swapout(struct folio *folio, swp_entry_t entry) { } static inline void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages) { } #endif /* CONFIG_MEMCG_V1 */ #endif /* _LINUX_MEMCONTROL_H */
3 4 2 2 3 3 3 1 2 1 1 2576 2567 9 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 /* Copyright 2011, Siemens AG * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com> */ /* Based on patches from Jon Smirl <jonsmirl@gmail.com> * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* Jon's code is based on 6lowpan implementation for Contiki which is: * Copyright (c) 2008, Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the Institute nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/ieee802154.h> #include <linux/if_arp.h> #include <net/ipv6.h> #include <net/netdev_lock.h> #include "6lowpan_i.h" static int open_count; static const struct header_ops lowpan_header_ops = { .create = lowpan_header_create, }; static int lowpan_dev_init(struct net_device *ldev) { netdev_lockdep_set_classes(ldev); return 0; } static int lowpan_open(struct net_device *dev) { if (!open_count) lowpan_rx_init(); open_count++; return 0; } static int lowpan_stop(struct net_device *dev) { open_count--; if (!open_count) lowpan_rx_exit(); return 0; } static int lowpan_neigh_construct(struct net_device *dev, struct neighbour *n) { struct lowpan_802154_neigh *neigh = lowpan_802154_neigh(neighbour_priv(n)); /* default no short_addr is available for a neighbour */ neigh->short_addr = cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC); return 0; } static int lowpan_get_iflink(const struct net_device *dev) { return READ_ONCE(lowpan_802154_dev(dev)->wdev->ifindex); } static const struct net_device_ops lowpan_netdev_ops = { .ndo_init = lowpan_dev_init, .ndo_start_xmit = lowpan_xmit, .ndo_open = lowpan_open, .ndo_stop = lowpan_stop, .ndo_neigh_construct = lowpan_neigh_construct, .ndo_get_iflink = lowpan_get_iflink, }; static void lowpan_setup(struct net_device *ldev) { memset(ldev->broadcast, 0xff, IEEE802154_ADDR_LEN); /* We need an ipv6hdr as minimum len when calling xmit */ ldev->hard_header_len = sizeof(struct ipv6hdr); ldev->flags = IFF_BROADCAST | IFF_MULTICAST; ldev->priv_flags |= IFF_NO_QUEUE; ldev->netdev_ops = &lowpan_netdev_ops; ldev->header_ops = &lowpan_header_ops; ldev->needs_free_netdev = true; ldev->netns_immutable = true; } static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN) return -EINVAL; } return 0; } static int lowpan_newlink(struct net_device *ldev, struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { struct nlattr **tb = params->tb; struct net_device *wdev; int ret; ASSERT_RTNL(); pr_debug("adding new link\n"); if (!tb[IFLA_LINK]) return -EINVAL; if (params->link_net && !net_eq(params->link_net, dev_net(ldev))) return -EINVAL; /* find and hold wpan device */ wdev = dev_get_by_index(dev_net(ldev), nla_get_u32(tb[IFLA_LINK])); if (!wdev) return -ENODEV; if (wdev->type != ARPHRD_IEEE802154) { dev_put(wdev); return -EINVAL; } if (wdev->ieee802154_ptr->lowpan_dev) { dev_put(wdev); return -EBUSY; } lowpan_802154_dev(ldev)->wdev = wdev; /* Set the lowpan hardware address to the wpan hardware address. */ __dev_addr_set(ldev, wdev->dev_addr, IEEE802154_ADDR_LEN); /* We need headroom for possible wpan_dev_hard_header call and tailroom * for encryption/fcs handling. The lowpan interface will replace * the IPv6 header with 6LoWPAN header. At worst case the 6LoWPAN * header has LOWPAN_IPHC_MAX_HEADER_LEN more bytes than the IPv6 * header. */ ldev->needed_headroom = LOWPAN_IPHC_MAX_HEADER_LEN + wdev->needed_headroom; ldev->needed_tailroom = wdev->needed_tailroom; ldev->neigh_priv_len = sizeof(struct lowpan_802154_neigh); ret = lowpan_register_netdevice(ldev, LOWPAN_LLTYPE_IEEE802154); if (ret < 0) { dev_put(wdev); return ret; } wdev->ieee802154_ptr->lowpan_dev = ldev; return 0; } static void lowpan_dellink(struct net_device *ldev, struct list_head *head) { struct net_device *wdev = lowpan_802154_dev(ldev)->wdev; ASSERT_RTNL(); wdev->ieee802154_ptr->lowpan_dev = NULL; lowpan_unregister_netdevice(ldev); dev_put(wdev); } static struct rtnl_link_ops lowpan_link_ops __read_mostly = { .kind = "lowpan", .priv_size = LOWPAN_PRIV_SIZE(sizeof(struct lowpan_802154_dev)), .setup = lowpan_setup, .newlink = lowpan_newlink, .dellink = lowpan_dellink, .validate = lowpan_validate, }; static inline int __init lowpan_netlink_init(void) { return rtnl_link_register(&lowpan_link_ops); } static inline void lowpan_netlink_fini(void) { rtnl_link_unregister(&lowpan_link_ops); } static int lowpan_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *ndev = netdev_notifier_info_to_dev(ptr); struct wpan_dev *wpan_dev; if (ndev->type != ARPHRD_IEEE802154) return NOTIFY_DONE; wpan_dev = ndev->ieee802154_ptr; if (!wpan_dev) return NOTIFY_DONE; switch (event) { case NETDEV_UNREGISTER: /* Check if wpan interface is unregistered that we * also delete possible lowpan interfaces which belongs * to the wpan interface. */ if (wpan_dev->lowpan_dev) lowpan_dellink(wpan_dev->lowpan_dev, NULL); break; default: return NOTIFY_DONE; } return NOTIFY_OK; } static struct notifier_block lowpan_dev_notifier = { .notifier_call = lowpan_device_event, }; static int __init lowpan_init_module(void) { int err = 0; err = lowpan_net_frag_init(); if (err < 0) goto out; err = lowpan_netlink_init(); if (err < 0) goto out_frag; err = register_netdevice_notifier(&lowpan_dev_notifier); if (err < 0) goto out_pack; return 0; out_pack: lowpan_netlink_fini(); out_frag: lowpan_net_frag_exit(); out: return err; } static void __exit lowpan_cleanup_module(void) { lowpan_netlink_fini(); lowpan_net_frag_exit(); unregister_netdevice_notifier(&lowpan_dev_notifier); } module_init(lowpan_init_module); module_exit(lowpan_cleanup_module); MODULE_DESCRIPTION("IPv6 over Low power Wireless Personal Area Network IEEE 802.15.4 core"); MODULE_LICENSE("GPL"); MODULE_ALIAS_RTNL_LINK("lowpan");
14 2 1 11 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 // SPDX-License-Identifier: GPL-2.0-only /* * (C) 2008-2009 Pablo Neira Ayuso <pablo@netfilter.org> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/skbuff.h> #include <linux/jhash.h> #include <linux/ip.h> #include <net/ipv6.h> #include <linux/netfilter/x_tables.h> #include <net/netfilter/nf_conntrack.h> #include <linux/netfilter/xt_cluster.h> static inline u32 nf_ct_orig_ipv4_src(const struct nf_conn *ct) { return (__force u32)ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip; } static inline const u32 *nf_ct_orig_ipv6_src(const struct nf_conn *ct) { return (__force u32 *)ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6; } static inline u_int32_t xt_cluster_hash_ipv4(u_int32_t ip, const struct xt_cluster_match_info *info) { return jhash_1word(ip, info->hash_seed); } static inline u_int32_t xt_cluster_hash_ipv6(const void *ip, const struct xt_cluster_match_info *info) { return jhash2(ip, NF_CT_TUPLE_L3SIZE / sizeof(__u32), info->hash_seed); } static inline u_int32_t xt_cluster_hash(const struct nf_conn *ct, const struct xt_cluster_match_info *info) { u_int32_t hash = 0; switch(nf_ct_l3num(ct)) { case AF_INET: hash = xt_cluster_hash_ipv4(nf_ct_orig_ipv4_src(ct), info); break; case AF_INET6: hash = xt_cluster_hash_ipv6(nf_ct_orig_ipv6_src(ct), info); break; default: WARN_ON(1); break; } return reciprocal_scale(hash, info->total_nodes); } static inline bool xt_cluster_is_multicast_addr(const struct sk_buff *skb, u_int8_t family) { bool is_multicast = false; switch(family) { case NFPROTO_IPV4: is_multicast = ipv4_is_multicast(ip_hdr(skb)->daddr); break; case NFPROTO_IPV6: is_multicast = ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr); break; default: WARN_ON(1); break; } return is_multicast; } static bool xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par) { struct sk_buff *pskb = (struct sk_buff *)skb; const struct xt_cluster_match_info *info = par->matchinfo; const struct nf_conn *ct; enum ip_conntrack_info ctinfo; unsigned long hash; /* This match assumes that all nodes see the same packets. This can be * achieved if the switch that connects the cluster nodes support some * sort of 'port mirroring'. However, if your switch does not support * this, your cluster nodes can reply ARP request using a multicast MAC * address. Thus, your switch will flood the same packets to the * cluster nodes with the same multicast MAC address. Using a multicast * link address is a RFC 1812 (section 3.3.2) violation, but this works * fine in practise. * * Unfortunately, if you use the multicast MAC address, the link layer * sets skbuff's pkt_type to PACKET_MULTICAST, which is not accepted * by TCP and others for packets coming to this node. For that reason, * this match mangles skbuff's pkt_type if it detects a packet * addressed to a unicast address but using PACKET_MULTICAST. Yes, I * know, matches should not alter packets, but we are doing this here * because we would need to add a PKTTYPE target for this sole purpose. */ if (!xt_cluster_is_multicast_addr(skb, xt_family(par)) && skb->pkt_type == PACKET_MULTICAST) { pskb->pkt_type = PACKET_HOST; } ct = nf_ct_get(skb, &ctinfo); if (ct == NULL) return false; if (ct->master) hash = xt_cluster_hash(ct->master, info); else hash = xt_cluster_hash(ct, info); return !!((1 << hash) & info->node_mask) ^ !!(info->flags & XT_CLUSTER_F_INV); } static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par) { struct xt_cluster_match_info *info = par->matchinfo; int ret; if (info->total_nodes > XT_CLUSTER_NODES_MAX) { pr_info_ratelimited("you have exceeded the maximum number of cluster nodes (%u > %u)\n", info->total_nodes, XT_CLUSTER_NODES_MAX); return -EINVAL; } if (info->node_mask >= (1ULL << info->total_nodes)) { pr_info_ratelimited("node mask cannot exceed total number of nodes\n"); return -EDOM; } ret = nf_ct_netns_get(par->net, par->family); if (ret < 0) pr_info_ratelimited("cannot load conntrack support for proto=%u\n", par->family); return ret; } static void xt_cluster_mt_destroy(const struct xt_mtdtor_param *par) { nf_ct_netns_put(par->net, par->family); } static struct xt_match xt_cluster_match[] __read_mostly = { { .name = "cluster", .family = NFPROTO_IPV4, .match = xt_cluster_mt, .checkentry = xt_cluster_mt_checkentry, .matchsize = sizeof(struct xt_cluster_match_info), .destroy = xt_cluster_mt_destroy, .me = THIS_MODULE, }, #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) { .name = "cluster", .family = NFPROTO_IPV6, .match = xt_cluster_mt, .checkentry = xt_cluster_mt_checkentry, .matchsize = sizeof(struct xt_cluster_match_info), .destroy = xt_cluster_mt_destroy, .me = THIS_MODULE, }, #endif }; static int __init xt_cluster_mt_init(void) { return xt_register_matches(xt_cluster_match, ARRAY_SIZE(xt_cluster_match)); } static void __exit xt_cluster_mt_fini(void) { xt_unregister_matches(xt_cluster_match, ARRAY_SIZE(xt_cluster_match)); } MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Xtables: hash-based cluster match"); MODULE_ALIAS("ipt_cluster"); MODULE_ALIAS("ip6t_cluster"); module_init(xt_cluster_mt_init); module_exit(xt_cluster_mt_fini);
1 1 3 1 1 9 9 9 8 1 1 9 9 8 8 8 8 8 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 // SPDX-License-Identifier: GPL-2.0-or-later /* * (C) 2012 by Pablo Neira Ayuso <pablo@netfilter.org> * (C) 2012 by Vyatta Inc. <http://www.vyatta.com> */ #include <linux/types.h> #include <linux/netfilter.h> #include <linux/skbuff.h> #include <linux/vmalloc.h> #include <linux/stddef.h> #include <linux/err.h> #include <linux/percpu.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/slab.h> #include <linux/export.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_extend.h> #include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_timeout.h> const struct nf_ct_timeout_hooks __rcu *nf_ct_timeout_hook __read_mostly; EXPORT_SYMBOL_GPL(nf_ct_timeout_hook); static int untimeout(struct nf_conn *ct, void *timeout) { struct nf_conn_timeout *timeout_ext = nf_ct_timeout_find(ct); if (timeout_ext) { const struct nf_ct_timeout *t; t = rcu_access_pointer(timeout_ext->timeout); if (!timeout || t == timeout) RCU_INIT_POINTER(timeout_ext->timeout, NULL); } /* We are not intended to delete this conntrack. */ return 0; } void nf_ct_untimeout(struct net *net, struct nf_ct_timeout *timeout) { struct nf_ct_iter_data iter_data = { .net = net, .data = timeout, }; nf_ct_iterate_cleanup_net(untimeout, &iter_data); } EXPORT_SYMBOL_GPL(nf_ct_untimeout); static void __nf_ct_timeout_put(struct nf_ct_timeout *timeout) { const struct nf_ct_timeout_hooks *h = rcu_dereference(nf_ct_timeout_hook); if (h) h->timeout_put(timeout); } int nf_ct_set_timeout(struct net *net, struct nf_conn *ct, u8 l3num, u8 l4num, const char *timeout_name) { const struct nf_ct_timeout_hooks *h; struct nf_ct_timeout *timeout; struct nf_conn_timeout *timeout_ext; const char *errmsg = NULL; int ret = 0; rcu_read_lock(); h = rcu_dereference(nf_ct_timeout_hook); if (!h) { ret = -ENOENT; errmsg = "Timeout policy base is empty"; goto out; } timeout = h->timeout_find_get(net, timeout_name); if (!timeout) { ret = -ENOENT; pr_info_ratelimited("No such timeout policy \"%s\"\n", timeout_name); goto out; } if (timeout->l3num != l3num) { ret = -EINVAL; pr_info_ratelimited("Timeout policy `%s' can only be used by " "L%d protocol number %d\n", timeout_name, 3, timeout->l3num); goto err_put_timeout; } /* Make sure the timeout policy matches any existing protocol tracker, * otherwise default to generic. */ if (timeout->l4proto->l4proto != l4num) { ret = -EINVAL; pr_info_ratelimited("Timeout policy `%s' can only be used by " "L%d protocol number %d\n", timeout_name, 4, timeout->l4proto->l4proto); goto err_put_timeout; } timeout_ext = nf_ct_timeout_ext_add(ct, timeout, GFP_ATOMIC); if (!timeout_ext) { ret = -ENOMEM; goto err_put_timeout; } rcu_read_unlock(); return ret; err_put_timeout: __nf_ct_timeout_put(timeout); out: rcu_read_unlock(); if (errmsg) pr_info_ratelimited("%s\n", errmsg); return ret; } EXPORT_SYMBOL_GPL(nf_ct_set_timeout); void nf_ct_destroy_timeout(struct nf_conn *ct) { struct nf_conn_timeout *timeout_ext; const struct nf_ct_timeout_hooks *h; rcu_read_lock(); h = rcu_dereference(nf_ct_timeout_hook); if (h) { timeout_ext = nf_ct_timeout_find(ct); if (timeout_ext) { struct nf_ct_timeout *t; t = rcu_dereference(timeout_ext->timeout); if (t) h->timeout_put(t); RCU_INIT_POINTER(timeout_ext->timeout, NULL); } } rcu_read_unlock(); } EXPORT_SYMBOL_GPL(nf_ct_destroy_timeout);
3 3 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 // SPDX-License-Identifier: GPL-2.0 /* * thermal_hwmon.c - Generic Thermal Management hwmon support. * * Code based on Intel thermal_core.c. Copyrights of the original code: * Copyright (C) 2008 Intel Corp * Copyright (C) 2008 Zhang Rui <rui.zhang@intel.com> * Copyright (C) 2008 Sujith Thomas <sujith.thomas@intel.com> * * Copyright (C) 2013 Texas Instruments * Copyright (C) 2013 Eduardo Valentin <eduardo.valentin@ti.com> */ #include <linux/err.h> #include <linux/export.h> #include <linux/hwmon.h> #include <linux/slab.h> #include <linux/thermal.h> #include "thermal_hwmon.h" #include "thermal_core.h" /* hwmon sys I/F */ /* thermal zone devices with the same type share one hwmon device */ struct thermal_hwmon_device { char type[THERMAL_NAME_LENGTH]; struct device *device; int count; struct list_head tz_list; struct list_head node; }; struct thermal_hwmon_attr { struct device_attribute attr; char name[16]; }; /* one temperature input for each thermal zone */ struct thermal_hwmon_temp { struct list_head hwmon_node; struct thermal_zone_device *tz; struct thermal_hwmon_attr temp_input; /* hwmon sys attr */ struct thermal_hwmon_attr temp_crit; /* hwmon sys attr */ }; static LIST_HEAD(thermal_hwmon_list); static DEFINE_MUTEX(thermal_hwmon_list_lock); static ssize_t temp_input_show(struct device *dev, struct device_attribute *attr, char *buf) { int temperature; int ret; struct thermal_hwmon_attr *hwmon_attr = container_of(attr, struct thermal_hwmon_attr, attr); struct thermal_hwmon_temp *temp = container_of(hwmon_attr, struct thermal_hwmon_temp, temp_input); struct thermal_zone_device *tz = temp->tz; ret = thermal_zone_get_temp(tz, &temperature); if (ret) return ret; return sprintf(buf, "%d\n", temperature); } static ssize_t temp_crit_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_hwmon_attr *hwmon_attr = container_of(attr, struct thermal_hwmon_attr, attr); struct thermal_hwmon_temp *temp = container_of(hwmon_attr, struct thermal_hwmon_temp, temp_crit); struct thermal_zone_device *tz = temp->tz; int temperature; int ret; guard(thermal_zone)(tz); ret = tz->ops.get_crit_temp(tz, &temperature); if (ret) return ret; return sprintf(buf, "%d\n", temperature); } static struct thermal_hwmon_device * thermal_hwmon_lookup_by_type(const struct thermal_zone_device *tz) { struct thermal_hwmon_device *hwmon; char type[THERMAL_NAME_LENGTH]; mutex_lock(&thermal_hwmon_list_lock); list_for_each_entry(hwmon, &thermal_hwmon_list, node) { strscpy(type, tz->type); strreplace(type, '-', '_'); if (!strcmp(hwmon->type, type)) { mutex_unlock(&thermal_hwmon_list_lock); return hwmon; } } mutex_unlock(&thermal_hwmon_list_lock); return NULL; } /* Find the temperature input matching a given thermal zone */ static struct thermal_hwmon_temp * thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon, const struct thermal_zone_device *tz) { struct thermal_hwmon_temp *temp; mutex_lock(&thermal_hwmon_list_lock); list_for_each_entry(temp, &hwmon->tz_list, hwmon_node) if (temp->tz == tz) { mutex_unlock(&thermal_hwmon_list_lock); return temp; } mutex_unlock(&thermal_hwmon_list_lock); return NULL; } static bool thermal_zone_crit_temp_valid(struct thermal_zone_device *tz) { int temp; return tz->ops.get_crit_temp && !tz->ops.get_crit_temp(tz, &temp); } int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) { struct thermal_hwmon_device *hwmon; struct thermal_hwmon_temp *temp; int new_hwmon_device = 1; int result; hwmon = thermal_hwmon_lookup_by_type(tz); if (hwmon) { new_hwmon_device = 0; goto register_sys_interface; } hwmon = kzalloc(sizeof(*hwmon), GFP_KERNEL); if (!hwmon) return -ENOMEM; INIT_LIST_HEAD(&hwmon->tz_list); strscpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH); strreplace(hwmon->type, '-', '_'); hwmon->device = hwmon_device_register_for_thermal(&tz->device, hwmon->type, hwmon); if (IS_ERR(hwmon->device)) { result = PTR_ERR(hwmon->device); goto free_mem; } register_sys_interface: temp = kzalloc(sizeof(*temp), GFP_KERNEL); if (!temp) { result = -ENOMEM; goto unregister_name; } temp->tz = tz; hwmon->count++; snprintf(temp->temp_input.name, sizeof(temp->temp_input.name), "temp%d_input", hwmon->count); temp->temp_input.attr.attr.name = temp->temp_input.name; temp->temp_input.attr.attr.mode = 0444; temp->temp_input.attr.show = temp_input_show; sysfs_attr_init(&temp->temp_input.attr.attr); result = device_create_file(hwmon->device, &temp->temp_input.attr); if (result) goto free_temp_mem; if (thermal_zone_crit_temp_valid(tz)) { snprintf(temp->temp_crit.name, sizeof(temp->temp_crit.name), "temp%d_crit", hwmon->count); temp->temp_crit.attr.attr.name = temp->temp_crit.name; temp->temp_crit.attr.attr.mode = 0444; temp->temp_crit.attr.show = temp_crit_show; sysfs_attr_init(&temp->temp_crit.attr.attr); result = device_create_file(hwmon->device, &temp->temp_crit.attr); if (result) goto unregister_input; } mutex_lock(&thermal_hwmon_list_lock); if (new_hwmon_device) list_add_tail(&hwmon->node, &thermal_hwmon_list); list_add_tail(&temp->hwmon_node, &hwmon->tz_list); mutex_unlock(&thermal_hwmon_list_lock); return 0; unregister_input: device_remove_file(hwmon->device, &temp->temp_input.attr); free_temp_mem: kfree(temp); unregister_name: if (new_hwmon_device) hwmon_device_unregister(hwmon->device); free_mem: kfree(hwmon); return result; } EXPORT_SYMBOL_GPL(thermal_add_hwmon_sysfs); void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) { struct thermal_hwmon_device *hwmon; struct thermal_hwmon_temp *temp; hwmon = thermal_hwmon_lookup_by_type(tz); if (unlikely(!hwmon)) { /* Should never happen... */ dev_dbg(&tz->device, "hwmon device lookup failed!\n"); return; } temp = thermal_hwmon_lookup_temp(hwmon, tz); if (unlikely(!temp)) { /* Should never happen... */ dev_dbg(&tz->device, "temperature input lookup failed!\n"); return; } device_remove_file(hwmon->device, &temp->temp_input.attr); if (thermal_zone_crit_temp_valid(tz)) device_remove_file(hwmon->device, &temp->temp_crit.attr); mutex_lock(&thermal_hwmon_list_lock); list_del(&temp->hwmon_node); kfree(temp); if (!list_empty(&hwmon->tz_list)) { mutex_unlock(&thermal_hwmon_list_lock); return; } list_del(&hwmon->node); mutex_unlock(&thermal_hwmon_list_lock); hwmon_device_unregister(hwmon->device); kfree(hwmon); } EXPORT_SYMBOL_GPL(thermal_remove_hwmon_sysfs); static void devm_thermal_hwmon_release(struct device *dev, void *res) { thermal_remove_hwmon_sysfs(*(struct thermal_zone_device **)res); } int devm_thermal_add_hwmon_sysfs(struct device *dev, struct thermal_zone_device *tz) { struct thermal_zone_device **ptr; int ret; ptr = devres_alloc(devm_thermal_hwmon_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) { dev_warn(dev, "Failed to allocate device resource data\n"); return -ENOMEM; } ret = thermal_add_hwmon_sysfs(tz); if (ret) { dev_warn(dev, "Failed to add hwmon sysfs attributes\n"); devres_free(ptr); return ret; } *ptr = tz; devres_add(dev, ptr); return ret; } EXPORT_SYMBOL_GPL(devm_thermal_add_hwmon_sysfs); MODULE_IMPORT_NS("HWMON_THERMAL");
117 24 451 441 293 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 // SPDX-License-Identifier: GPL-2.0-or-later /* * Spanning tree protocol; timer-related code * Linux ethernet bridge * * Authors: * Lennert Buytenhek <buytenh@gnu.org> */ #include <linux/kernel.h> #include <linux/times.h> #include "br_private.h" #include "br_private_stp.h" /* called under bridge lock */ static int br_is_designated_for_some_port(const struct net_bridge *br) { struct net_bridge_port *p; list_for_each_entry(p, &br->port_list, list) { if (p->state != BR_STATE_DISABLED && !memcmp(&p->designated_bridge, &br->bridge_id, 8)) return 1; } return 0; } static void br_hello_timer_expired(struct timer_list *t) { struct net_bridge *br = timer_container_of(br, t, hello_timer); br_debug(br, "hello timer expired\n"); spin_lock(&br->lock); if (br->dev->flags & IFF_UP) { br_config_bpdu_generation(br); if (br->stp_enabled == BR_KERNEL_STP) mod_timer(&br->hello_timer, round_jiffies(jiffies + br->hello_time)); } spin_unlock(&br->lock); } static void br_message_age_timer_expired(struct timer_list *t) { struct net_bridge_port *p = timer_container_of(p, t, message_age_timer); struct net_bridge *br = p->br; const bridge_id *id = &p->designated_bridge; int was_root; if (p->state == BR_STATE_DISABLED) return; br_info(br, "port %u(%s) neighbor %.2x%.2x.%pM lost\n", (unsigned int) p->port_no, p->dev->name, id->prio[0], id->prio[1], &id->addr); /* * According to the spec, the message age timer cannot be * running when we are the root bridge. So.. this was_root * check is redundant. I'm leaving it in for now, though. */ spin_lock(&br->lock); if (p->state == BR_STATE_DISABLED) goto unlock; was_root = br_is_root_bridge(br); br_become_designated_port(p); br_configuration_update(br); br_port_state_selection(br); if (br_is_root_bridge(br) && !was_root) br_become_root_bridge(br); unlock: spin_unlock(&br->lock); } static void br_forward_delay_timer_expired(struct timer_list *t) { struct net_bridge_port *p = timer_container_of(p, t, forward_delay_timer); struct net_bridge *br = p->br; br_debug(br, "port %u(%s) forward delay timer\n", (unsigned int) p->port_no, p->dev->name); spin_lock(&br->lock); if (p->state == BR_STATE_LISTENING) { br_set_state(p, BR_STATE_LEARNING); mod_timer(&p->forward_delay_timer, jiffies + br->forward_delay); } else if (p->state == BR_STATE_LEARNING) { br_set_state(p, BR_STATE_FORWARDING); if (br_is_designated_for_some_port(br)) br_topology_change_detection(br); netif_carrier_on(br->dev); } rcu_read_lock(); br_ifinfo_notify(RTM_NEWLINK, NULL, p); rcu_read_unlock(); spin_unlock(&br->lock); } static void br_tcn_timer_expired(struct timer_list *t) { struct net_bridge *br = timer_container_of(br, t, tcn_timer); br_debug(br, "tcn timer expired\n"); spin_lock(&br->lock); if (!br_is_root_bridge(br) && (br->dev->flags & IFF_UP)) { br_transmit_tcn(br); mod_timer(&br->tcn_timer, jiffies + br->bridge_hello_time); } spin_unlock(&br->lock); } static void br_topology_change_timer_expired(struct timer_list *t) { struct net_bridge *br = timer_container_of(br, t, topology_change_timer); br_debug(br, "topo change timer expired\n"); spin_lock(&br->lock); br->topology_change_detected = 0; __br_set_topology_change(br, 0); spin_unlock(&br->lock); } static void br_hold_timer_expired(struct timer_list *t) { struct net_bridge_port *p = timer_container_of(p, t, hold_timer); br_debug(p->br, "port %u(%s) hold timer expired\n", (unsigned int) p->port_no, p->dev->name); spin_lock(&p->br->lock); if (p->config_pending) br_transmit_config(p); spin_unlock(&p->br->lock); } void br_stp_timer_init(struct net_bridge *br) { timer_setup(&br->hello_timer, br_hello_timer_expired, 0); timer_setup(&br->tcn_timer, br_tcn_timer_expired, 0); timer_setup(&br->topology_change_timer, br_topology_change_timer_expired, 0); } void br_stp_port_timer_init(struct net_bridge_port *p) { timer_setup(&p->message_age_timer, br_message_age_timer_expired, 0); timer_setup(&p->forward_delay_timer, br_forward_delay_timer_expired, 0); timer_setup(&p->hold_timer, br_hold_timer_expired, 0); } /* Report ticks left (in USER_HZ) used for API */ unsigned long br_timer_value(const struct timer_list *timer) { return timer_pending(timer) ? jiffies_delta_to_clock_t(timer->expires - jiffies) : 0; }
28 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2008 Oracle. All rights reserved. */ #include <linux/kernel.h> #include <linux/bio.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/highmem.h> #include <linux/kthread.h> #include <linux/time.h> #include <linux/init.h> #include <linux/string.h> #include <linux/backing-dev.h> #include <linux/writeback.h> #include <linux/psi.h> #include <linux/slab.h> #include <linux/sched/mm.h> #include <linux/log2.h> #include <linux/shrinker.h> #include <crypto/hash.h> #include "misc.h" #include "ctree.h" #include "fs.h" #include "btrfs_inode.h" #include "bio.h" #include "ordered-data.h" #include "compression.h" #include "extent_io.h" #include "extent_map.h" #include "subpage.h" #include "messages.h" #include "super.h" static struct bio_set btrfs_compressed_bioset; static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" }; const char* btrfs_compress_type2str(enum btrfs_compression_type type) { switch (type) { case BTRFS_COMPRESS_ZLIB: case BTRFS_COMPRESS_LZO: case BTRFS_COMPRESS_ZSTD: case BTRFS_COMPRESS_NONE: return btrfs_compress_types[type]; default: break; } return NULL; } static inline struct compressed_bio *to_compressed_bio(struct btrfs_bio *bbio) { return container_of(bbio, struct compressed_bio, bbio); } static struct compressed_bio *alloc_compressed_bio(struct btrfs_inode *inode, u64 start, blk_opf_t op, btrfs_bio_end_io_t end_io) { struct btrfs_bio *bbio; bbio = btrfs_bio(bio_alloc_bioset(NULL, BTRFS_MAX_COMPRESSED_PAGES, op, GFP_NOFS, &btrfs_compressed_bioset)); btrfs_bio_init(bbio, inode, start, end_io, NULL); return to_compressed_bio(bbio); } bool btrfs_compress_is_valid_type(const char *str, size_t len) { int i; for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) { size_t comp_len = strlen(btrfs_compress_types[i]); if (len < comp_len) continue; if (!strncmp(btrfs_compress_types[i], str, comp_len)) return true; } return false; } static int compression_compress_pages(int type, struct list_head *ws, struct btrfs_inode *inode, u64 start, struct folio **folios, unsigned long *out_folios, unsigned long *total_in, unsigned long *total_out) { switch (type) { case BTRFS_COMPRESS_ZLIB: return zlib_compress_folios(ws, inode, start, folios, out_folios, total_in, total_out); case BTRFS_COMPRESS_LZO: return lzo_compress_folios(ws, inode, start, folios, out_folios, total_in, total_out); case BTRFS_COMPRESS_ZSTD: return zstd_compress_folios(ws, inode, start, folios, out_folios, total_in, total_out); case BTRFS_COMPRESS_NONE: default: /* * This can happen when compression races with remount setting * it to 'no compress', while caller doesn't call * inode_need_compress() to check if we really need to * compress. * * Not a big deal, just need to inform caller that we * haven't allocated any pages yet. */ *out_folios = 0; return -E2BIG; } } static int compression_decompress_bio(struct list_head *ws, struct compressed_bio *cb) { switch (cb->compress_type) { case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb); case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb); case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb); case BTRFS_COMPRESS_NONE: default: /* * This can't happen, the type is validated several times * before we get here. */ BUG(); } } static int compression_decompress(int type, struct list_head *ws, const u8 *data_in, struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen, size_t destlen) { switch (type) { case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_folio, dest_pgoff, srclen, destlen); case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_folio, dest_pgoff, srclen, destlen); case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_folio, dest_pgoff, srclen, destlen); case BTRFS_COMPRESS_NONE: default: /* * This can't happen, the type is validated several times * before we get here. */ BUG(); } } static void btrfs_free_compressed_folios(struct compressed_bio *cb) { for (unsigned int i = 0; i < cb->nr_folios; i++) btrfs_free_compr_folio(cb->compressed_folios[i]); kfree(cb->compressed_folios); } static int btrfs_decompress_bio(struct compressed_bio *cb); /* * Global cache of last unused pages for compression/decompression. */ static struct btrfs_compr_pool { struct shrinker *shrinker; spinlock_t lock; struct list_head list; int count; int thresh; } compr_pool; static unsigned long btrfs_compr_pool_count(struct shrinker *sh, struct shrink_control *sc) { int ret; /* * We must not read the values more than once if 'ret' gets expanded in * the return statement so we don't accidentally return a negative * number, even if the first condition finds it positive. */ ret = READ_ONCE(compr_pool.count) - READ_ONCE(compr_pool.thresh); return ret > 0 ? ret : 0; } static unsigned long btrfs_compr_pool_scan(struct shrinker *sh, struct shrink_control *sc) { LIST_HEAD(remove); struct list_head *tmp, *next; int freed; if (compr_pool.count == 0) return SHRINK_STOP; /* For now, just simply drain the whole list. */ spin_lock(&compr_pool.lock); list_splice_init(&compr_pool.list, &remove); freed = compr_pool.count; compr_pool.count = 0; spin_unlock(&compr_pool.lock); list_for_each_safe(tmp, next, &remove) { struct page *page = list_entry(tmp, struct page, lru); ASSERT(page_ref_count(page) == 1); put_page(page); } return freed; } /* * Common wrappers for page allocation from compression wrappers */ struct folio *btrfs_alloc_compr_folio(struct btrfs_fs_info *fs_info) { struct folio *folio = NULL; /* For bs > ps cases, no cached folio pool for now. */ if (fs_info->block_min_order) goto alloc; spin_lock(&compr_pool.lock); if (compr_pool.count > 0) { folio = list_first_entry(&compr_pool.list, struct folio, lru); list_del_init(&folio->lru); compr_pool.count--; } spin_unlock(&compr_pool.lock); if (folio) return folio; alloc: return folio_alloc(GFP_NOFS, fs_info->block_min_order); } void btrfs_free_compr_folio(struct folio *folio) { bool do_free = false; /* The folio is from bs > ps fs, no cached pool for now. */ if (folio_order(folio)) goto free; spin_lock(&compr_pool.lock); if (compr_pool.count > compr_pool.thresh) { do_free = true; } else { list_add(&folio->lru, &compr_pool.list); compr_pool.count++; } spin_unlock(&compr_pool.lock); if (!do_free) return; free: ASSERT(folio_ref_count(folio) == 1); folio_put(folio); } static void end_bbio_compressed_read(struct btrfs_bio *bbio) { struct compressed_bio *cb = to_compressed_bio(bbio); blk_status_t status = bbio->bio.bi_status; if (!status) status = errno_to_blk_status(btrfs_decompress_bio(cb)); btrfs_free_compressed_folios(cb); btrfs_bio_end_io(cb->orig_bbio, status); bio_put(&bbio->bio); } /* * Clear the writeback bits on all of the file * pages for a compressed write */ static noinline void end_compressed_writeback(const struct compressed_bio *cb) { struct inode *inode = &cb->bbio.inode->vfs_inode; struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); pgoff_t index = cb->start >> PAGE_SHIFT; const pgoff_t end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT; struct folio_batch fbatch; int i; int ret; ret = blk_status_to_errno(cb->bbio.bio.bi_status); if (ret) mapping_set_error(inode->i_mapping, ret); folio_batch_init(&fbatch); while (index <= end_index) { ret = filemap_get_folios(inode->i_mapping, &index, end_index, &fbatch); if (ret == 0) return; for (i = 0; i < ret; i++) { struct folio *folio = fbatch.folios[i]; btrfs_folio_clamp_clear_writeback(fs_info, folio, cb->start, cb->len); } folio_batch_release(&fbatch); } /* the inode may be gone now */ } /* * Do the cleanup once all the compressed pages hit the disk. This will clear * writeback on the file pages and free the compressed pages. * * This also calls the writeback end hooks for the file pages so that metadata * and checksums can be updated in the file. */ static void end_bbio_compressed_write(struct btrfs_bio *bbio) { struct compressed_bio *cb = to_compressed_bio(bbio); btrfs_finish_ordered_extent(cb->bbio.ordered, NULL, cb->start, cb->len, cb->bbio.bio.bi_status == BLK_STS_OK); if (cb->writeback) end_compressed_writeback(cb); /* Note, our inode could be gone now. */ btrfs_free_compressed_folios(cb); bio_put(&cb->bbio.bio); } static void btrfs_add_compressed_bio_folios(struct compressed_bio *cb) { struct bio *bio = &cb->bbio.bio; u32 offset = 0; unsigned int findex = 0; while (offset < cb->compressed_len) { struct folio *folio = cb->compressed_folios[findex]; u32 len = min_t(u32, cb->compressed_len - offset, folio_size(folio)); int ret; /* Maximum compressed extent is smaller than bio size limit. */ ret = bio_add_folio(bio, folio, len, 0); ASSERT(ret); offset += len; findex++; } } /* * worker function to build and submit bios for previously compressed pages. * The corresponding pages in the inode should be marked for writeback * and the compressed pages should have a reference on them for dropping * when the IO is complete. * * This also checksums the file bytes and gets things ready for * the end io hooks. */ void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered, struct folio **compressed_folios, unsigned int nr_folios, blk_opf_t write_flags, bool writeback) { struct btrfs_inode *inode = ordered->inode; struct btrfs_fs_info *fs_info = inode->root->fs_info; struct compressed_bio *cb; ASSERT(IS_ALIGNED(ordered->file_offset, fs_info->sectorsize)); ASSERT(IS_ALIGNED(ordered->num_bytes, fs_info->sectorsize)); cb = alloc_compressed_bio(inode, ordered->file_offset, REQ_OP_WRITE | write_flags, end_bbio_compressed_write); cb->start = ordered->file_offset; cb->len = ordered->num_bytes; cb->compressed_folios = compressed_folios; cb->compressed_len = ordered->disk_num_bytes; cb->writeback = writeback; cb->nr_folios = nr_folios; cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT; cb->bbio.ordered = ordered; btrfs_add_compressed_bio_folios(cb); btrfs_submit_bbio(&cb->bbio, 0); } /* * Add extra pages in the same compressed file extent so that we don't need to * re-read the same extent again and again. * * NOTE: this won't work well for subpage, as for subpage read, we lock the * full page then submit bio for each compressed/regular extents. * * This means, if we have several sectors in the same page points to the same * on-disk compressed data, we will re-read the same extent many times and * this function can only help for the next page. */ static noinline int add_ra_bio_pages(struct inode *inode, u64 compressed_end, struct compressed_bio *cb, int *memstall, unsigned long *pflags) { struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); pgoff_t end_index; struct bio *orig_bio = &cb->orig_bbio->bio; u64 cur = cb->orig_bbio->file_offset + orig_bio->bi_iter.bi_size; u64 isize = i_size_read(inode); int ret; struct folio *folio; struct extent_map *em; struct address_space *mapping = inode->i_mapping; struct extent_map_tree *em_tree; struct extent_io_tree *tree; int sectors_missed = 0; em_tree = &BTRFS_I(inode)->extent_tree; tree = &BTRFS_I(inode)->io_tree; if (isize == 0) return 0; /* * For current subpage support, we only support 64K page size, * which means maximum compressed extent size (128K) is just 2x page * size. * This makes readahead less effective, so here disable readahead for * subpage for now, until full compressed write is supported. */ if (fs_info->sectorsize < PAGE_SIZE) return 0; /* For bs > ps cases, we don't support readahead for compressed folios for now. */ if (fs_info->block_min_order) return 0; end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; while (cur < compressed_end) { pgoff_t page_end; pgoff_t pg_index = cur >> PAGE_SHIFT; u32 add_size; if (pg_index > end_index) break; folio = filemap_get_folio(mapping, pg_index); if (!IS_ERR(folio)) { u64 folio_sz = folio_size(folio); u64 offset = offset_in_folio(folio, cur); folio_put(folio); sectors_missed += (folio_sz - offset) >> fs_info->sectorsize_bits; /* Beyond threshold, no need to continue */ if (sectors_missed > 4) break; /* * Jump to next page start as we already have page for * current offset. */ cur += (folio_sz - offset); continue; } folio = filemap_alloc_folio(mapping_gfp_constraint(mapping, ~__GFP_FS), 0, NULL); if (!folio) break; if (filemap_add_folio(mapping, folio, pg_index, GFP_NOFS)) { /* There is already a page, skip to page end */ cur += folio_size(folio); folio_put(folio); continue; } if (!*memstall && folio_test_workingset(folio)) { psi_memstall_enter(pflags); *memstall = 1; } ret = set_folio_extent_mapped(folio); if (ret < 0) { folio_unlock(folio); folio_put(folio); break; } page_end = (pg_index << PAGE_SHIFT) + folio_size(folio) - 1; btrfs_lock_extent(tree, cur, page_end, NULL); read_lock(&em_tree->lock); em = btrfs_lookup_extent_mapping(em_tree, cur, page_end + 1 - cur); read_unlock(&em_tree->lock); /* * At this point, we have a locked page in the page cache for * these bytes in the file. But, we have to make sure they map * to this compressed extent on disk. */ if (!em || cur < em->start || (cur + fs_info->sectorsize > btrfs_extent_map_end(em)) || (btrfs_extent_map_block_start(em) >> SECTOR_SHIFT) != orig_bio->bi_iter.bi_sector) { btrfs_free_extent_map(em); btrfs_unlock_extent(tree, cur, page_end, NULL); folio_unlock(folio); folio_put(folio); break; } add_size = min(em->start + em->len, page_end + 1) - cur; btrfs_free_extent_map(em); btrfs_unlock_extent(tree, cur, page_end, NULL); if (folio_contains(folio, end_index)) { size_t zero_offset = offset_in_folio(folio, isize); if (zero_offset) { int zeros; zeros = folio_size(folio) - zero_offset; folio_zero_range(folio, zero_offset, zeros); } } if (!bio_add_folio(orig_bio, folio, add_size, offset_in_folio(folio, cur))) { folio_unlock(folio); folio_put(folio); break; } /* * If it's subpage, we also need to increase its * subpage::readers number, as at endio we will decrease * subpage::readers and to unlock the page. */ if (fs_info->sectorsize < PAGE_SIZE) btrfs_folio_set_lock(fs_info, folio, cur, add_size); folio_put(folio); cur += add_size; } return 0; } /* * for a compressed read, the bio we get passed has all the inode pages * in it. We don't actually do IO on those pages but allocate new ones * to hold the compressed pages on disk. * * bio->bi_iter.bi_sector points to the compressed extent on disk * bio->bi_io_vec points to all of the inode pages * * After the compressed pages are read, we copy the bytes into the * bio we were passed and then call the bio end_io calls */ void btrfs_submit_compressed_read(struct btrfs_bio *bbio) { struct btrfs_inode *inode = bbio->inode; struct btrfs_fs_info *fs_info = inode->root->fs_info; struct extent_map_tree *em_tree = &inode->extent_tree; struct compressed_bio *cb; unsigned int compressed_len; u64 file_offset = bbio->file_offset; u64 em_len; u64 em_start; struct extent_map *em; unsigned long pflags; int memstall = 0; blk_status_t status; int ret; /* we need the actual starting offset of this extent in the file */ read_lo