Total coverage: 378845 (19%)of 2091607
1 1 24 24 24 9 40 40 40 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2019 Mellanox Technologies. All rights reserved. */ #include <rdma/ib_verbs.h> #include <rdma/rdma_counter.h> #include "core_priv.h" #include "restrack.h" #define ALL_AUTO_MODE_MASKS (RDMA_COUNTER_MASK_QP_TYPE | RDMA_COUNTER_MASK_PID) static int __counter_set_mode(struct rdma_port_counter *port_counter, enum rdma_nl_counter_mode new_mode, enum rdma_nl_counter_mask new_mask, bool bind_opcnt) { if (new_mode == RDMA_COUNTER_MODE_AUTO) { if (new_mask & (~ALL_AUTO_MODE_MASKS)) return -EINVAL; if (port_counter->num_counters) return -EBUSY; } port_counter->mode.mode = new_mode; port_counter->mode.mask = new_mask; port_counter->mode.bind_opcnt = bind_opcnt; return 0; } /* * rdma_counter_set_auto_mode() - Turn on/off per-port auto mode * * @dev: Device to operate * @port: Port to use * @mask: Mask to configure * @extack: Message to the user * * Return 0 on success. If counter mode wasn't changed then it is considered * as success as well. * Return -EBUSY when changing to auto mode while there are bounded counters. * */ int rdma_counter_set_auto_mode(struct ib_device *dev, u32 port, enum rdma_nl_counter_mask mask, bool bind_opcnt, struct netlink_ext_ack *extack) { struct rdma_port_counter *port_counter; enum rdma_nl_counter_mode mode; int ret; port_counter = &dev->port_data[port].port_counter; if (!port_counter->hstats) return -EOPNOTSUPP; mutex_lock(&port_counter->lock); if (mask) mode = RDMA_COUNTER_MODE_AUTO; else mode = (port_counter->num_counters) ? RDMA_COUNTER_MODE_MANUAL : RDMA_COUNTER_MODE_NONE; if (port_counter->mode.mode == mode && port_counter->mode.mask == mask && port_counter->mode.bind_opcnt == bind_opcnt) { ret = 0; goto out; } ret = __counter_set_mode(port_counter, mode, mask, bind_opcnt); out: mutex_unlock(&port_counter->lock); if (ret == -EBUSY) NL_SET_ERR_MSG( extack, "Modifying auto mode is not allowed when there is a bound QP"); return ret; } static void auto_mode_init_counter(struct rdma_counter *counter, const struct ib_qp *qp, enum rdma_nl_counter_mask new_mask) { struct auto_mode_param *param = &counter->mode.param; counter->mode.mode = RDMA_COUNTER_MODE_AUTO; counter->mode.mask = new_mask; if (new_mask & RDMA_COUNTER_MASK_QP_TYPE) param->qp_type = qp->qp_type; } static int __rdma_counter_bind_qp(struct rdma_counter *counter, struct ib_qp *qp, u32 port) { int ret; if (qp->counter) return -EINVAL; if (!qp->device->ops.counter_bind_qp) return -EOPNOTSUPP; mutex_lock(&counter->lock); ret = qp->device->ops.counter_bind_qp(counter, qp, port); mutex_unlock(&counter->lock); return ret; } int rdma_counter_modify(struct ib_device *dev, u32 port, unsigned int index, bool enable) { struct rdma_hw_stats *stats; int ret = 0; if (!dev->ops.modify_hw_stat) return -EOPNOTSUPP; stats = ib_get_hw_stats_port(dev, port); if (!stats || index >= stats->num_counters || !(stats->descs[index].flags & IB_STAT_FLAG_OPTIONAL)) return -EINVAL; mutex_lock(&stats->lock); if (enable != test_bit(index, stats->is_disabled)) goto out; ret = dev->ops.modify_hw_stat(dev, port, index, enable); if (ret) goto out; if (enable) clear_bit(index, stats->is_disabled); else set_bit(index, stats->is_disabled); out: mutex_unlock(&stats->lock); return ret; } static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u32 port, struct ib_qp *qp, enum rdma_nl_counter_mode mode, bool bind_opcnt) { struct rdma_port_counter *port_counter; struct rdma_counter *counter; int ret; if (!dev->ops.counter_dealloc || !dev->ops.counter_alloc_stats) return NULL; counter = rdma_zalloc_drv_obj(dev, rdma_counter); if (!counter) return NULL; counter->device = dev; counter->port = port; dev->ops.counter_init(counter); rdma_restrack_new(&counter->res, RDMA_RESTRACK_COUNTER); counter->stats = dev->ops.counter_alloc_stats(counter); if (!counter->stats) goto err_stats; port_counter = &dev->port_data[port].port_counter; mutex_lock(&port_counter->lock); switch (mode) { case RDMA_COUNTER_MODE_MANUAL: ret = __counter_set_mode(port_counter, RDMA_COUNTER_MODE_MANUAL, 0, bind_opcnt); if (ret) { mutex_unlock(&port_counter->lock); goto err_mode; } break; case RDMA_COUNTER_MODE_AUTO: auto_mode_init_counter(counter, qp, port_counter->mode.mask); break; default: ret = -EOPNOTSUPP; mutex_unlock(&port_counter->lock); goto err_mode; } port_counter->num_counters++; mutex_unlock(&port_counter->lock); counter->mode.mode = mode; counter->mode.bind_opcnt = bind_opcnt; kref_init(&counter->kref); mutex_init(&counter->lock); ret = __rdma_counter_bind_qp(counter, qp, port); if (ret) goto err_mode; rdma_restrack_parent_name(&counter->res, &qp->res); rdma_restrack_add(&counter->res); return counter; err_mode: rdma_free_hw_stats_struct(counter->stats); err_stats: rdma_restrack_put(&counter->res); kfree(counter); return NULL; } static void rdma_counter_free(struct rdma_counter *counter) { struct rdma_port_counter *port_counter; port_counter = &counter->device->port_data[counter->port].port_counter; mutex_lock(&port_counter->lock); port_counter->num_counters--; if (!port_counter->num_counters && (port_counter->mode.mode == RDMA_COUNTER_MODE_MANUAL)) __counter_set_mode(port_counter, RDMA_COUNTER_MODE_NONE, 0, false); mutex_unlock(&port_counter->lock); rdma_restrack_del(&counter->res); rdma_free_hw_stats_struct(counter->stats); kfree(counter); } static bool auto_mode_match(struct ib_qp *qp, struct rdma_counter *counter, enum rdma_nl_counter_mask auto_mask) { struct auto_mode_param *param = &counter->mode.param; bool match = true; if (auto_mask & RDMA_COUNTER_MASK_QP_TYPE) match &= (param->qp_type == qp->qp_type); if (auto_mask & RDMA_COUNTER_MASK_PID) match &= (task_pid_nr(counter->res.task) == task_pid_nr(qp->res.task)); return match; } static int __rdma_counter_unbind_qp(struct ib_qp *qp, u32 port) { struct rdma_counter *counter = qp->counter; int ret; if (!qp->device->ops.counter_unbind_qp) return -EOPNOTSUPP; mutex_lock(&counter->lock); ret = qp->device->ops.counter_unbind_qp(qp, port); mutex_unlock(&counter->lock); return ret; } static void counter_history_stat_update(struct rdma_counter *counter) { struct ib_device *dev = counter->device; struct rdma_port_counter *port_counter; int i; port_counter = &dev->port_data[counter->port].port_counter; if (!port_counter->hstats) return; rdma_counter_query_stats(counter); for (i = 0; i < counter->stats->num_counters; i++) port_counter->hstats->value[i] += counter->stats->value[i]; } /* * rdma_get_counter_auto_mode - Find the counter that @qp should be bound * with in auto mode * * Return: The counter (with ref-count increased) if found */ static struct rdma_counter *rdma_get_counter_auto_mode(struct ib_qp *qp, u32 port) { struct rdma_port_counter *port_counter; struct rdma_counter *counter = NULL; struct ib_device *dev = qp->device; struct rdma_restrack_entry *res; struct rdma_restrack_root *rt; unsigned long id = 0; port_counter = &dev->port_data[port].port_counter; rt = &dev->res[RDMA_RESTRACK_COUNTER]; xa_lock(&rt->xa); xa_for_each(&rt->xa, id, res) { counter = container_of(res, struct rdma_counter, res); if ((counter->device != qp->device) || (counter->port != port)) goto next; if (auto_mode_match(qp, counter, port_counter->mode.mask)) break; next: counter = NULL; } if (counter && !kref_get_unless_zero(&counter->kref)) counter = NULL; xa_unlock(&rt->xa); return counter; } static void counter_release(struct kref *kref) { struct rdma_counter *counter; counter = container_of(kref, struct rdma_counter, kref); counter_history_stat_update(counter); counter->device->ops.counter_dealloc(counter); rdma_counter_free(counter); } /* * rdma_counter_bind_qp_auto - Check and bind the QP to a counter base on * the auto-mode rule */ int rdma_counter_bind_qp_auto(struct ib_qp *qp, u32 port) { struct rdma_port_counter *port_counter; struct ib_device *dev = qp->device; struct rdma_counter *counter; int ret; if (!rdma_restrack_is_tracked(&qp->res) || rdma_is_kernel_res(&qp->res)) return 0; if (!rdma_is_port_valid(dev, port)) return -EINVAL; port_counter = &dev->port_data[port].port_counter; if (port_counter->mode.mode != RDMA_COUNTER_MODE_AUTO) return 0; counter = rdma_get_counter_auto_mode(qp, port); if (counter) { ret = __rdma_counter_bind_qp(counter, qp, port); if (ret) { kref_put(&counter->kref, counter_release); return ret; } } else { counter = alloc_and_bind(dev, port, qp, RDMA_COUNTER_MODE_AUTO, port_counter->mode.bind_opcnt); if (!counter) return -ENOMEM; } return 0; } /* * rdma_counter_unbind_qp - Unbind a qp from a counter * @force: * true - Decrease the counter ref-count anyway (e.g., qp destroy) */ int rdma_counter_unbind_qp(struct ib_qp *qp, u32 port, bool force) { struct rdma_counter *counter = qp->counter; int ret; if (!counter) return -EINVAL; ret = __rdma_counter_unbind_qp(qp, port); if (ret && !force) return ret; kref_put(&counter->kref, counter_release); return 0; } int rdma_counter_query_stats(struct rdma_counter *counter) { struct ib_device *dev = counter->device; int ret; if (!dev->ops.counter_update_stats) return -EINVAL; mutex_lock(&counter->lock); ret = dev->ops.counter_update_stats(counter); mutex_unlock(&counter->lock); return ret; } static u64 get_running_counters_hwstat_sum(struct ib_device *dev, u32 port, u32 index) { struct rdma_restrack_entry *res; struct rdma_restrack_root *rt; struct rdma_counter *counter; unsigned long id = 0; u64 sum = 0; rt = &dev->res[RDMA_RESTRACK_COUNTER]; xa_lock(&rt->xa); xa_for_each(&rt->xa, id, res) { if (!rdma_restrack_get(res)) continue; xa_unlock(&rt->xa); counter = container_of(res, struct rdma_counter, res); if ((counter->device != dev) || (counter->port != port) || rdma_counter_query_stats(counter)) goto next; sum += counter->stats->value[index]; next: xa_lock(&rt->xa); rdma_restrack_put(res); } xa_unlock(&rt->xa); return sum; } /* * rdma_counter_get_hwstat_value() - Get the sum value of all counters on a * specific port, including the running ones and history data */ u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u32 port, u32 index) { struct rdma_port_counter *port_counter; u64 sum; port_counter = &dev->port_data[port].port_counter; if (!port_counter->hstats) return 0; sum = get_running_counters_hwstat_sum(dev, port, index); sum += port_counter->hstats->value[index]; return sum; } static struct ib_qp *rdma_counter_get_qp(struct ib_device *dev, u32 qp_num) { struct rdma_restrack_entry *res = NULL; struct ib_qp *qp = NULL; res = rdma_restrack_get_byid(dev, RDMA_RESTRACK_QP, qp_num); if (IS_ERR(res)) return NULL; qp = container_of(res, struct ib_qp, res); if (qp->qp_type == IB_QPT_RAW_PACKET && !rdma_dev_has_raw_cap(dev)) goto err; return qp; err: rdma_restrack_put(res); return NULL; } static struct rdma_counter *rdma_get_counter_by_id(struct ib_device *dev, u32 counter_id) { struct rdma_restrack_entry *res; struct rdma_counter *counter; res = rdma_restrack_get_byid(dev, RDMA_RESTRACK_COUNTER, counter_id); if (IS_ERR(res)) return NULL; counter = container_of(res, struct rdma_counter, res); kref_get(&counter->kref); rdma_restrack_put(res); return counter; } /* * rdma_counter_bind_qpn() - Bind QP @qp_num to counter @counter_id */ int rdma_counter_bind_qpn(struct ib_device *dev, u32 port, u32 qp_num, u32 counter_id) { struct rdma_port_counter *port_counter; struct rdma_counter *counter; struct ib_qp *qp; int ret; port_counter = &dev->port_data[port].port_counter; if (port_counter->mode.mode == RDMA_COUNTER_MODE_AUTO) return -EINVAL; qp = rdma_counter_get_qp(dev, qp_num); if (!qp) return -ENOENT; counter = rdma_get_counter_by_id(dev, counter_id); if (!counter) { ret = -ENOENT; goto err; } if (rdma_is_kernel_res(&counter->res) != rdma_is_kernel_res(&qp->res)) { ret = -EINVAL; goto err_task; } if ((counter->device != qp->device) || (counter->port != qp->port)) { ret = -EINVAL; goto err_task; } ret = __rdma_counter_bind_qp(counter, qp, port); if (ret) goto err_task; rdma_restrack_put(&qp->res); return 0; err_task: kref_put(&counter->kref, counter_release); err: rdma_restrack_put(&qp->res); return ret; } /* * rdma_counter_bind_qpn_alloc() - Alloc a counter and bind QP @qp_num to it * The id of new counter is returned in @counter_id */ int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u32 port, u32 qp_num, u32 *counter_id) { struct rdma_port_counter *port_counter; struct rdma_counter *counter; struct ib_qp *qp; int ret; if (!rdma_is_port_valid(dev, port)) return -EINVAL; port_counter = &dev->port_data[port].port_counter; if (!port_counter->hstats) return -EOPNOTSUPP; if (port_counter->mode.mode == RDMA_COUNTER_MODE_AUTO) return -EINVAL; qp = rdma_counter_get_qp(dev, qp_num); if (!qp) return -ENOENT; if (rdma_is_port_valid(dev, qp->port) && (qp->port != port)) { ret = -EINVAL; goto err; } counter = alloc_and_bind(dev, port, qp, RDMA_COUNTER_MODE_MANUAL, true); if (!counter) { ret = -ENOMEM; goto err; } if (counter_id) *counter_id = counter->id; rdma_restrack_put(&qp->res); return 0; err: rdma_restrack_put(&qp->res); return ret; } /* * rdma_counter_unbind_qpn() - Unbind QP @qp_num from a counter */ int rdma_counter_unbind_qpn(struct ib_device *dev, u32 port, u32 qp_num, u32 counter_id) { struct rdma_port_counter *port_counter; struct ib_qp *qp; int ret; if (!rdma_is_port_valid(dev, port)) return -EINVAL; qp = rdma_counter_get_qp(dev, qp_num); if (!qp) return -ENOENT; if (rdma_is_port_valid(dev, qp->port) && (qp->port != port)) { ret = -EINVAL; goto out; } port_counter = &dev->port_data[port].port_counter; if (!qp->counter || qp->counter->id != counter_id || port_counter->mode.mode != RDMA_COUNTER_MODE_MANUAL) { ret = -EINVAL; goto out; } ret = rdma_counter_unbind_qp(qp, port, false); out: rdma_restrack_put(&qp->res); return ret; } int rdma_counter_get_mode(struct ib_device *dev, u32 port, enum rdma_nl_counter_mode *mode, enum rdma_nl_counter_mask *mask, bool *opcnt) { struct rdma_port_counter *port_counter; port_counter = &dev->port_data[port].port_counter; *mode = port_counter->mode.mode; *mask = port_counter->mode.mask; *opcnt = port_counter->mode.bind_opcnt; return 0; } void rdma_counter_init(struct ib_device *dev) { struct rdma_port_counter *port_counter; u32 port, i; if (!dev->port_data) return; rdma_for_each_port(dev, port) { port_counter = &dev->port_data[port].port_counter; port_counter->mode.mode = RDMA_COUNTER_MODE_NONE; mutex_init(&port_counter->lock); if (!dev->ops.alloc_hw_port_stats) continue; port_counter->hstats = dev->ops.alloc_hw_port_stats(dev, port); if (!port_counter->hstats) goto fail; } return; fail: for (i = port; i >= rdma_start_port(dev); i--) { port_counter = &dev->port_data[port].port_counter; rdma_free_hw_stats_struct(port_counter->hstats); port_counter->hstats = NULL; mutex_destroy(&port_counter->lock); } } void rdma_counter_release(struct ib_device *dev) { struct rdma_port_counter *port_counter; u32 port; rdma_for_each_port(dev, port) { port_counter = &dev->port_data[port].port_counter; rdma_free_hw_stats_struct(port_counter->hstats); mutex_destroy(&port_counter->lock); } }
1119 1120 1120 1124 785 783 784 784 9 9 9 379 510 509 510 509 144 146 295 295 14 2 14 14 385 385 385 14 2 2 14 14 538 878 37 869 109 24 24 24 24 24 24 2 301 7 78 24 2 24 24 24 24 24 24 24 24 24 24 23 23 19 19 10 19 18 19 25 24 1 23 25 1 1 19 18 9 5 1 4 5 4 19 26 26 26 17 9 9 1 25 24 24 24 9 4 6 1 3 3 2 16 15 16 16 16 15 16 26 24 26 6 3 3 3 1 6 374 373 374 129 4 1 3 3 373 319 2 2 318 27 22 372 358 314 125 358 82 6 2 4 80 56 37 128 128 128 119 119 119 86 72 119 90 61 11 1 10 11 4 7 343 344 344 343 185 184 185 344 1 1 1 1 1 343 343 342 344 344 1 1 1 342 304 309 256 119 119 119 119 309 343 20 343 371 17 17 371 370 301 308 307 9 306 5 2 302 52 52 3 1 1 2 2 3 2 1 3 297 5 4 11 301 291 2 289 161 360 4 4 4 4 3 4 4 4 4 4 4 4 361 355 7 355 349 349 151 369 357 356 357 349 357 144 6 4 1 5 3 5 5 3 5 5 5 5 5 2 2 2 2 2 2 2 2 2 2 1 1 1 11 11 8 4 8 491 490 491 491 424 374 20 20 9 372 1 371 11 8 8 8 2 6 3 9 9 3 6 6 370 1 370 361 5 5 359 14 360 54 2 1 1 2 1 1 1 1 361 7 55 56 373 4 3 1135 1132 1136 933 1131 1137 1139 1136 1129 1141 1137 1133 1142 1136 33 1137 517 513 1132 1134 1136 1134 37 37 37 37 37 37 33 33 19 24 20 37 37 11 11 11 1 37 37 274 1 273 274 274 273 1 1 1 274 295 22 296 296 296 296 296 296 296 296 293 295 294 296 295 282 273 271 273 272 296 295 13 13 293 295 240 259 258 294 283 295 283 283 283 283 274 273 187 187 207 208 294 295 284 283 284 339 339 304 304 78 339 338 1 1 1 1 1 1 337 316 317 317 317 313 337 302 293 301 339 339 301 295 78 339 336 339 345 343 342 345 339 338 339 339 13 339 278 1119 1124 1120 1119 1122 250 1120 1122 1119 1123 1121 1120 1122 1124 1123 1 1121 1123 1122 1124 1120 1122 240 241 1122 1117 1116 1120 1119 1121 1119 1120 1098 410 408 1044 1100 1104 1044 1041 314 1043 17 17 1041 1102 1103 1097 1102 1102 1101 1104 1039 412 3 2 39 3 2 83 83 5 82 37 419 107 5 419 418 419 416 419 419 418 418 418 418 419 106 383 419 109 109 109 109 109 109 109 109 109 3 3 3 2 3 3 3 3 3 3 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 3 1 3 3 3 3 3 2 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 // SPDX-License-Identifier: GPL-2.0-or-later /* * Linux INET6 implementation * Forwarding Information Database * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * * Changes: * Yuji SEKIYA @USAGI: Support default route on router node; * remove ip6_null_entry from the top of * routing table. * Ville Nuorvala: Fixed routing subtrees. */ #define pr_fmt(fmt) "IPv6: " fmt #include <linux/bpf.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/net.h> #include <linux/route.h> #include <linux/netdevice.h> #include <linux/in6.h> #include <linux/init.h> #include <linux/list.h> #include <linux/slab.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/ndisc.h> #include <net/addrconf.h> #include <net/lwtunnel.h> #include <net/fib_notifier.h> #include <net/ip_fib.h> #include <net/ip6_fib.h> #include <net/ip6_route.h> static struct kmem_cache *fib6_node_kmem __read_mostly; struct fib6_cleaner { struct fib6_walker w; struct net *net; int (*func)(struct fib6_info *, void *arg); int sernum; void *arg; bool skip_notify; }; #ifdef CONFIG_IPV6_SUBTREES #define FWS_INIT FWS_S #else #define FWS_INIT FWS_L #endif static struct fib6_info *fib6_find_prefix(struct net *net, struct fib6_table *table, struct fib6_node *fn); static struct fib6_node *fib6_repair_tree(struct net *net, struct fib6_table *table, struct fib6_node *fn); static int fib6_walk(struct net *net, struct fib6_walker *w); static int fib6_walk_continue(struct fib6_walker *w); /* * A routing update causes an increase of the serial number on the * affected subtree. This allows for cached routes to be asynchronously * tested when modifications are made to the destination cache as a * result of redirects, path MTU changes, etc. */ static void fib6_gc_timer_cb(struct timer_list *t); #define FOR_WALKERS(net, w) \ list_for_each_entry(w, &(net)->ipv6.fib6_walkers, lh) static void fib6_walker_link(struct net *net, struct fib6_walker *w) { write_lock_bh(&net->ipv6.fib6_walker_lock); list_add(&w->lh, &net->ipv6.fib6_walkers); write_unlock_bh(&net->ipv6.fib6_walker_lock); } static void fib6_walker_unlink(struct net *net, struct fib6_walker *w) { write_lock_bh(&net->ipv6.fib6_walker_lock); list_del(&w->lh); write_unlock_bh(&net->ipv6.fib6_walker_lock); } static int fib6_new_sernum(struct net *net) { int new, old = atomic_read(&net->ipv6.fib6_sernum); do { new = old < INT_MAX ? old + 1 : 1; } while (!atomic_try_cmpxchg(&net->ipv6.fib6_sernum, &old, new)); return new; } enum { FIB6_NO_SERNUM_CHANGE = 0, }; void fib6_update_sernum(struct net *net, struct fib6_info *f6i) { struct fib6_node *fn; fn = rcu_dereference_protected(f6i->fib6_node, lockdep_is_held(&f6i->fib6_table->tb6_lock)); if (fn) WRITE_ONCE(fn->fn_sernum, fib6_new_sernum(net)); } /* * Auxiliary address test functions for the radix tree. * * These assume a 32bit processor (although it will work on * 64bit processors) */ /* * test bit */ #if defined(__LITTLE_ENDIAN) # define BITOP_BE32_SWIZZLE (0x1F & ~7) #else # define BITOP_BE32_SWIZZLE 0 #endif static __be32 addr_bit_set(const void *token, int fn_bit) { const __be32 *addr = token; /* * Here, * 1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f) * is optimized version of * htonl(1 << ((~fn_bit)&0x1F)) * See include/asm-generic/bitops/le.h. */ return (__force __be32)(1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)) & addr[fn_bit >> 5]; } struct fib6_info *fib6_info_alloc(gfp_t gfp_flags, bool with_fib6_nh) { struct fib6_info *f6i; size_t sz = sizeof(*f6i); if (with_fib6_nh) sz += sizeof(struct fib6_nh); f6i = kzalloc(sz, gfp_flags); if (!f6i) return NULL; /* fib6_siblings is a union with nh_list, so this initializes both */ INIT_LIST_HEAD(&f6i->fib6_siblings); refcount_set(&f6i->fib6_ref, 1); INIT_HLIST_NODE(&f6i->gc_link); return f6i; } void fib6_info_destroy_rcu(struct rcu_head *head) { struct fib6_info *f6i = container_of(head, struct fib6_info, rcu); WARN_ON(f6i->fib6_node); if (f6i->nh) nexthop_put(f6i->nh); else fib6_nh_release(f6i->fib6_nh); ip_fib_metrics_put(f6i->fib6_metrics); kfree(f6i); } EXPORT_SYMBOL_GPL(fib6_info_destroy_rcu); static struct fib6_node *node_alloc(struct net *net) { struct fib6_node *fn; fn = kmem_cache_zalloc(fib6_node_kmem, GFP_ATOMIC); if (fn) net->ipv6.rt6_stats->fib_nodes++; return fn; } static void node_free_immediate(struct net *net, struct fib6_node *fn) { kmem_cache_free(fib6_node_kmem, fn); net->ipv6.rt6_stats->fib_nodes--; } static void node_free(struct net *net, struct fib6_node *fn) { kfree_rcu(fn, rcu); net->ipv6.rt6_stats->fib_nodes--; } static void fib6_free_table(struct fib6_table *table) { inetpeer_invalidate_tree(&table->tb6_peers); kfree(table); } static void fib6_link_table(struct net *net, struct fib6_table *tb) { unsigned int h; /* * Initialize table lock at a single place to give lockdep a key, * tables aren't visible prior to being linked to the list. */ spin_lock_init(&tb->tb6_lock); h = tb->tb6_id & (FIB6_TABLE_HASHSZ - 1); /* * No protection necessary, this is the only list mutatation * operation, tables never disappear once they exist. */ hlist_add_head_rcu(&tb->tb6_hlist, &net->ipv6.fib_table_hash[h]); } #ifdef CONFIG_IPV6_MULTIPLE_TABLES static struct fib6_table *fib6_alloc_table(struct net *net, u32 id) { struct fib6_table *table; table = kzalloc_obj(*table, GFP_ATOMIC); if (table) { table->tb6_id = id; rcu_assign_pointer(table->tb6_root.leaf, net->ipv6.fib6_null_entry); table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; inet_peer_base_init(&table->tb6_peers); INIT_HLIST_HEAD(&table->tb6_gc_hlist); } return table; } struct fib6_table *fib6_new_table(struct net *net, u32 id) { struct fib6_table *tb, *new_tb; if (id == 0) id = RT6_TABLE_MAIN; tb = fib6_get_table(net, id); if (tb) return tb; new_tb = fib6_alloc_table(net, id); if (!new_tb) return NULL; spin_lock_bh(&net->ipv6.fib_table_hash_lock); tb = fib6_get_table(net, id); if (unlikely(tb)) { spin_unlock_bh(&net->ipv6.fib_table_hash_lock); kfree(new_tb); return tb; } fib6_link_table(net, new_tb); spin_unlock_bh(&net->ipv6.fib_table_hash_lock); return new_tb; } EXPORT_SYMBOL_GPL(fib6_new_table); struct fib6_table *fib6_get_table(struct net *net, u32 id) { struct hlist_head *head; struct fib6_table *tb; if (!id) id = RT6_TABLE_MAIN; head = &net->ipv6.fib_table_hash[id & (FIB6_TABLE_HASHSZ - 1)]; /* See comment in fib6_link_table(). RCU is not required, * but rcu_dereference_raw() is used to avoid data-race. */ hlist_for_each_entry_rcu(tb, head, tb6_hlist, true) if (tb->tb6_id == id) return tb; return NULL; } EXPORT_SYMBOL_GPL(fib6_get_table); static void __net_init fib6_tables_init(struct net *net) { fib6_link_table(net, net->ipv6.fib6_main_tbl); fib6_link_table(net, net->ipv6.fib6_local_tbl); } #else struct fib6_table *fib6_new_table(struct net *net, u32 id) { return fib6_get_table(net, id); } struct fib6_table *fib6_get_table(struct net *net, u32 id) { return net->ipv6.fib6_main_tbl; } struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, const struct sk_buff *skb, int flags, pol_lookup_t lookup) { struct rt6_info *rt; rt = pol_lookup_func(lookup, net, net->ipv6.fib6_main_tbl, fl6, skb, flags); if (rt->dst.error == -EAGAIN) { ip6_rt_put_flags(rt, flags); rt = net->ipv6.ip6_null_entry; if (!(flags & RT6_LOOKUP_F_DST_NOREF)) dst_hold(&rt->dst); } return &rt->dst; } /* called with rcu lock held; no reference taken on fib6_info */ int fib6_lookup(struct net *net, int oif, struct flowi6 *fl6, struct fib6_result *res, int flags) { return fib6_table_lookup(net, net->ipv6.fib6_main_tbl, oif, fl6, res, flags); } static void __net_init fib6_tables_init(struct net *net) { fib6_link_table(net, net->ipv6.fib6_main_tbl); } #endif unsigned int fib6_tables_seq_read(const struct net *net) { unsigned int h, fib_seq = 0; rcu_read_lock(); for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { const struct hlist_head *head = &net->ipv6.fib_table_hash[h]; const struct fib6_table *tb; hlist_for_each_entry_rcu(tb, head, tb6_hlist) fib_seq += READ_ONCE(tb->fib_seq); } rcu_read_unlock(); return fib_seq; } static int call_fib6_entry_notifier(struct notifier_block *nb, enum fib_event_type event_type, struct fib6_info *rt, struct netlink_ext_ack *extack) { struct fib6_entry_notifier_info info = { .info.extack = extack, .rt = rt, }; return call_fib6_notifier(nb, event_type, &info.info); } static int call_fib6_multipath_entry_notifier(struct notifier_block *nb, enum fib_event_type event_type, struct fib6_info *rt, unsigned int nsiblings, struct netlink_ext_ack *extack) { struct fib6_entry_notifier_info info = { .info.extack = extack, .rt = rt, .nsiblings = nsiblings, }; return call_fib6_notifier(nb, event_type, &info.info); } int call_fib6_entry_notifiers(struct net *net, enum fib_event_type event_type, struct fib6_info *rt, struct netlink_ext_ack *extack) { struct fib6_entry_notifier_info info = { .info.extack = extack, .rt = rt, }; WRITE_ONCE(rt->fib6_table->fib_seq, rt->fib6_table->fib_seq + 1); return call_fib6_notifiers(net, event_type, &info.info); } int call_fib6_multipath_entry_notifiers(struct net *net, enum fib_event_type event_type, struct fib6_info *rt, unsigned int nsiblings, struct netlink_ext_ack *extack) { struct fib6_entry_notifier_info info = { .info.extack = extack, .rt = rt, .nsiblings = nsiblings, }; WRITE_ONCE(rt->fib6_table->fib_seq, rt->fib6_table->fib_seq + 1); return call_fib6_notifiers(net, event_type, &info.info); } int call_fib6_entry_notifiers_replace(struct net *net, struct fib6_info *rt) { struct fib6_entry_notifier_info info = { .rt = rt, .nsiblings = rt->fib6_nsiblings, }; WRITE_ONCE(rt->fib6_table->fib_seq, rt->fib6_table->fib_seq + 1); return call_fib6_notifiers(net, FIB_EVENT_ENTRY_REPLACE, &info.info); } struct fib6_dump_arg { struct net *net; struct notifier_block *nb; struct netlink_ext_ack *extack; }; static int fib6_rt_dump(struct fib6_info *rt, struct fib6_dump_arg *arg) { enum fib_event_type fib_event = FIB_EVENT_ENTRY_REPLACE; unsigned int nsiblings; int err; if (!rt || rt == arg->net->ipv6.fib6_null_entry) return 0; nsiblings = READ_ONCE(rt->fib6_nsiblings); if (nsiblings) err = call_fib6_multipath_entry_notifier(arg->nb, fib_event, rt, nsiblings, arg->extack); else err = call_fib6_entry_notifier(arg->nb, fib_event, rt, arg->extack); return err; } static int fib6_node_dump(struct fib6_walker *w) { int err; err = fib6_rt_dump(w->leaf, w->args); w->leaf = NULL; return err; } static int fib6_table_dump(struct net *net, struct fib6_table *tb, struct fib6_walker *w) { int err; w->root = &tb->tb6_root; spin_lock_bh(&tb->tb6_lock); err = fib6_walk(net, w); spin_unlock_bh(&tb->tb6_lock); return err; } /* Called with rcu_read_lock() */ int fib6_tables_dump(struct net *net, struct notifier_block *nb, struct netlink_ext_ack *extack) { struct fib6_dump_arg arg; struct fib6_walker *w; unsigned int h; int err = 0; w = kzalloc_obj(*w, GFP_ATOMIC); if (!w) return -ENOMEM; w->func = fib6_node_dump; arg.net = net; arg.nb = nb; arg.extack = extack; w->args = &arg; for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { struct hlist_head *head = &net->ipv6.fib_table_hash[h]; struct fib6_table *tb; hlist_for_each_entry_rcu(tb, head, tb6_hlist) { err = fib6_table_dump(net, tb, w); if (err) goto out; } } out: kfree(w); /* The tree traversal function should never return a positive value. */ return err > 0 ? -EINVAL : err; } static int fib6_dump_node(struct fib6_walker *w) { int res; struct fib6_info *rt; for_each_fib6_walker_rt(w) { res = rt6_dump_route(rt, w->args, w->skip_in_node); if (res >= 0) { /* Frame is full, suspend walking */ w->leaf = rt; /* We'll restart from this node, so if some routes were * already dumped, skip them next time. */ w->skip_in_node += res; return 1; } w->skip_in_node = 0; /* Multipath routes are dumped in one route with the * RTA_MULTIPATH attribute. Jump 'rt' to point to the * last sibling of this route (no need to dump the * sibling routes again) */ if (rt->fib6_nsiblings) rt = list_last_entry(&rt->fib6_siblings, struct fib6_info, fib6_siblings); } w->leaf = NULL; return 0; } static void fib6_dump_end(struct netlink_callback *cb) { struct net *net = sock_net(cb->skb->sk); struct fib6_walker *w = (void *)cb->args[2]; if (w) { if (cb->args[4]) { cb->args[4] = 0; fib6_walker_unlink(net, w); } cb->args[2] = 0; kfree(w); } cb->done = (void *)cb->args[3]; cb->args[1] = 3; } static int fib6_dump_done(struct netlink_callback *cb) { fib6_dump_end(cb); return cb->done ? cb->done(cb) : 0; } static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); struct fib6_walker *w; int res; w = (void *)cb->args[2]; w->root = &table->tb6_root; if (cb->args[4] == 0) { w->count = 0; w->skip = 0; w->skip_in_node = 0; spin_lock_bh(&table->tb6_lock); res = fib6_walk(net, w); spin_unlock_bh(&table->tb6_lock); if (res > 0) { cb->args[4] = 1; cb->args[5] = READ_ONCE(w->root->fn_sernum); } } else { int sernum = READ_ONCE(w->root->fn_sernum); if (cb->args[5] != sernum) { /* Begin at the root if the tree changed */ cb->args[5] = sernum; w->state = FWS_INIT; w->node = w->root; w->skip = w->count; w->skip_in_node = 0; } else w->skip = 0; spin_lock_bh(&table->tb6_lock); res = fib6_walk_continue(w); spin_unlock_bh(&table->tb6_lock); if (res <= 0) { fib6_walker_unlink(net, w); cb->args[4] = 0; } } return res; } static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) { struct rt6_rtnl_dump_arg arg = { .filter.dump_exceptions = true, .filter.dump_routes = true, .filter.rtnl_held = false, }; const struct nlmsghdr *nlh = cb->nlh; struct net *net = sock_net(skb->sk); unsigned int e = 0, s_e; struct hlist_head *head; struct fib6_walker *w; struct fib6_table *tb; unsigned int h, s_h; int err = 0; rcu_read_lock(); if (cb->strict_check) { err = ip_valid_fib_dump_req(net, nlh, &arg.filter, cb); if (err < 0) goto unlock; } else if (nlmsg_len(nlh) >= sizeof(struct rtmsg)) { struct rtmsg *rtm = nlmsg_data(nlh); if (rtm->rtm_flags & RTM_F_PREFIX) arg.filter.flags = RTM_F_PREFIX; } w = (void *)cb->args[2]; if (!w) { /* New dump: * * 1. allocate and initialize walker. */ w = kzalloc_obj(*w, GFP_ATOMIC); if (!w) { err = -ENOMEM; goto unlock; } w->func = fib6_dump_node; cb->args[2] = (long)w; /* 2. hook callback destructor. */ cb->args[3] = (long)cb->done; cb->done = fib6_dump_done; } arg.skb = skb; arg.cb = cb; arg.net = net; w->args = &arg; if (arg.filter.table_id) { tb = fib6_get_table(net, arg.filter.table_id); if (!tb) { if (rtnl_msg_family(cb->nlh) != PF_INET6) goto unlock; NL_SET_ERR_MSG_MOD(cb->extack, "FIB table does not exist"); err = -ENOENT; goto unlock; } if (!cb->args[0]) { err = fib6_dump_table(tb, skb, cb); if (!err) cb->args[0] = 1; } goto unlock; } s_h = cb->args[0]; s_e = cb->args[1]; for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) { e = 0; head = &net->ipv6.fib_table_hash[h]; hlist_for_each_entry_rcu(tb, head, tb6_hlist) { if (e < s_e) goto next; err = fib6_dump_table(tb, skb, cb); if (err != 0) goto out; next: e++; } } out: cb->args[1] = e; cb->args[0] = h; unlock: rcu_read_unlock(); if (err <= 0) fib6_dump_end(cb); return err; } void fib6_metric_set(struct fib6_info *f6i, int metric, u32 val) { struct dst_metrics *m; if (!f6i) return; if (READ_ONCE(f6i->fib6_metrics) == &dst_default_metrics) { struct dst_metrics *dflt = (struct dst_metrics *)&dst_default_metrics; struct dst_metrics *p = kzalloc_obj(*p, GFP_ATOMIC); if (!p) return; p->metrics[metric - 1] = val; refcount_set(&p->refcnt, 1); if (cmpxchg(&f6i->fib6_metrics, dflt, p) != dflt) kfree(p); else return; } m = READ_ONCE(f6i->fib6_metrics); WRITE_ONCE(m->metrics[metric - 1], val); } /* * Routing Table * * return the appropriate node for a routing tree "add" operation * by either creating and inserting or by returning an existing * node. */ static struct fib6_node *fib6_add_1(struct net *net, struct fib6_table *table, struct fib6_node *root, struct in6_addr *addr, int plen, int offset, int allow_create, int replace_required, struct netlink_ext_ack *extack) { struct fib6_node *fn, *in, *ln; struct fib6_node *pn = NULL; struct rt6key *key; int bit; __be32 dir = 0; /* insert node in tree */ fn = root; do { struct fib6_info *leaf = rcu_dereference_protected(fn->leaf, lockdep_is_held(&table->tb6_lock)); key = (struct rt6key *)((u8 *)leaf + offset); /* * Prefix match */ if (plen < fn->fn_bit || !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) { if (!allow_create) { if (replace_required) { NL_SET_ERR_MSG(extack, "Can not replace route - no match found"); pr_warn("Can't replace route, no match found\n"); return ERR_PTR(-ENOENT); } pr_warn("NLM_F_CREATE should be set when creating new route\n"); } goto insert_above; } /* * Exact match ? */ if (plen == fn->fn_bit) { /* clean up an intermediate node */ if (!(fn->fn_flags & RTN_RTINFO)) { RCU_INIT_POINTER(fn->leaf, NULL); fib6_info_release(leaf); /* remove null_entry in the root node */ } else if (fn->fn_flags & RTN_TL_ROOT && rcu_access_pointer(fn->leaf) == net->ipv6.fib6_null_entry) { RCU_INIT_POINTER(fn->leaf, NULL); } return fn; } /* * We have more bits to go */ /* Try to walk down on tree. */ dir = addr_bit_set(addr, fn->fn_bit); pn = fn; fn = dir ? rcu_dereference_protected(fn->right, lockdep_is_held(&table->tb6_lock)) : rcu_dereference_protected(fn->left, lockdep_is_held(&table->tb6_lock)); } while (fn); if (!allow_create) { /* We should not create new node because * NLM_F_REPLACE was specified without NLM_F_CREATE * I assume it is safe to require NLM_F_CREATE when * REPLACE flag is used! Later we may want to remove the * check for replace_required, because according * to netlink specification, NLM_F_CREATE * MUST be specified if new route is created. * That would keep IPv6 consistent with IPv4 */ if (replace_required) { NL_SET_ERR_MSG(extack, "Can not replace route - no match found"); pr_warn("Can't replace route, no match found\n"); return ERR_PTR(-ENOENT); } pr_warn("NLM_F_CREATE should be set when creating new route\n"); } /* * We walked to the bottom of tree. * Create new leaf node without children. */ ln = node_alloc(net); if (!ln) return ERR_PTR(-ENOMEM); ln->fn_bit = plen; RCU_INIT_POINTER(ln->parent, pn); if (dir) rcu_assign_pointer(pn->right, ln); else rcu_assign_pointer(pn->left, ln); return ln; insert_above: /* * split since we don't have a common prefix anymore or * we have a less significant route. * we've to insert an intermediate node on the list * this new node will point to the one we need to create * and the current */ pn = rcu_dereference_protected(fn->parent, lockdep_is_held(&table->tb6_lock)); /* find 1st bit in difference between the 2 addrs. See comment in __ipv6_addr_diff: bit may be an invalid value, but if it is >= plen, the value is ignored in any case. */ bit = __ipv6_addr_diff(addr, &key->addr, sizeof(*addr)); /* * (intermediate)[in] * / \ * (new leaf node)[ln] (old node)[fn] */ if (plen > bit) { in = node_alloc(net); ln = node_alloc(net); if (!in || !ln) { if (in) node_free_immediate(net, in); if (ln) node_free_immediate(net, ln); return ERR_PTR(-ENOMEM); } /* * new intermediate node. * RTN_RTINFO will * be off since that an address that chooses one of * the branches would not match less specific routes * in the other branch */ in->fn_bit = bit; RCU_INIT_POINTER(in->parent, pn); in->leaf = fn->leaf; fib6_info_hold(rcu_dereference_protected(in->leaf, lockdep_is_held(&table->tb6_lock))); /* update parent pointer */ if (dir) rcu_assign_pointer(pn->right, in); else rcu_assign_pointer(pn->left, in); ln->fn_bit = plen; RCU_INIT_POINTER(ln->parent, in); rcu_assign_pointer(fn->parent, in); if (addr_bit_set(addr, bit)) { rcu_assign_pointer(in->right, ln); rcu_assign_pointer(in->left, fn); } else { rcu_assign_pointer(in->left, ln); rcu_assign_pointer(in->right, fn); } } else { /* plen <= bit */ /* * (new leaf node)[ln] * / \ * (old node)[fn] NULL */ ln = node_alloc(net); if (!ln) return ERR_PTR(-ENOMEM); ln->fn_bit = plen; RCU_INIT_POINTER(ln->parent, pn); if (addr_bit_set(&key->addr, plen)) RCU_INIT_POINTER(ln->right, fn); else RCU_INIT_POINTER(ln->left, fn); rcu_assign_pointer(fn->parent, ln); if (dir) rcu_assign_pointer(pn->right, ln); else rcu_assign_pointer(pn->left, ln); } return ln; } static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh, const struct fib6_info *match) { int cpu; if (!fib6_nh->rt6i_pcpu) return; rcu_read_lock(); /* release the reference to this fib entry from * all of its cached pcpu routes */ for_each_possible_cpu(cpu) { struct rt6_info **ppcpu_rt; struct rt6_info *pcpu_rt; ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu); /* Paired with xchg() in rt6_get_pcpu_route() */ pcpu_rt = READ_ONCE(*ppcpu_rt); /* only dropping the 'from' reference if the cached route * is using 'match'. The cached pcpu_rt->from only changes * from a fib6_info to NULL (ip6_dst_destroy); it can never * change from one fib6_info reference to another */ if (pcpu_rt && rcu_access_pointer(pcpu_rt->from) == match) { struct fib6_info *from; from = unrcu_pointer(xchg(&pcpu_rt->from, NULL)); fib6_info_release(from); } } rcu_read_unlock(); } static int fib6_nh_drop_pcpu_from(struct fib6_nh *nh, void *_arg) { struct fib6_info *arg = _arg; __fib6_drop_pcpu_from(nh, arg); return 0; } static void fib6_drop_pcpu_from(struct fib6_info *f6i) { /* Make sure rt6_make_pcpu_route() wont add other percpu routes * while we are cleaning them here. */ f6i->fib6_destroying = 1; mb(); /* paired with the cmpxchg() in rt6_make_pcpu_route() */ if (f6i->nh) { rcu_read_lock(); nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_drop_pcpu_from, f6i); rcu_read_unlock(); } else { struct fib6_nh *fib6_nh; fib6_nh = f6i->fib6_nh; __fib6_drop_pcpu_from(fib6_nh, f6i); } } static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn, struct net *net) { struct fib6_table *table = rt->fib6_table; /* Flush all cached dst in exception table */ rt6_flush_exceptions(rt); fib6_drop_pcpu_from(rt); if (rt->nh) { spin_lock(&rt->nh->lock); if (!list_empty(&rt->nh_list)) list_del_init(&rt->nh_list); spin_unlock(&rt->nh->lock); } if (refcount_read(&rt->fib6_ref) != 1) { /* This route is used as dummy address holder in some split * nodes. It is not leaked, but it still holds other resources, * which must be released in time. So, scan ascendant nodes * and replace dummy references to this route with references * to still alive ones. */ while (fn) { struct fib6_info *leaf = rcu_dereference_protected(fn->leaf, lockdep_is_held(&table->tb6_lock)); struct fib6_info *new_leaf; if (!(fn->fn_flags & RTN_RTINFO) && leaf == rt) { new_leaf = fib6_find_prefix(net, table, fn); fib6_info_hold(new_leaf); rcu_assign_pointer(fn->leaf, new_leaf); fib6_info_release(rt); } fn = rcu_dereference_protected(fn->parent, lockdep_is_held(&table->tb6_lock)); } } fib6_clean_expires(rt); fib6_remove_gc_list(rt); } /* * Insert routing information in a node. */ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt, struct nl_info *info, struct netlink_ext_ack *extack, struct list_head *purge_list) { struct fib6_info *leaf = rcu_dereference_protected(fn->leaf, lockdep_is_held(&rt->fib6_table->tb6_lock)); struct fib6_info *iter = NULL; struct fib6_info __rcu **ins; struct fib6_info __rcu **fallback_ins = NULL; int replace = (info->nlh && (info->nlh->nlmsg_flags & NLM_F_REPLACE)); int add = (!info->nlh || (info->nlh->nlmsg_flags & NLM_F_CREATE)); int found = 0; bool rt_can_ecmp = rt6_qualify_for_ecmp(rt); bool notify_sibling_rt = false; u16 nlflags = NLM_F_EXCL; int err; if (info->nlh && (info->nlh->nlmsg_flags & NLM_F_APPEND)) nlflags |= NLM_F_APPEND; ins = &fn->leaf; for (iter = leaf; iter; iter = rcu_dereference_protected(iter->fib6_next, lockdep_is_held(&rt->fib6_table->tb6_lock))) { /* * Search for duplicates */ if (iter->fib6_metric == rt->fib6_metric) { /* * Same priority level */ if (info->nlh && (info->nlh->nlmsg_flags & NLM_F_EXCL)) return -EEXIST; nlflags &= ~NLM_F_EXCL; if (replace) { if (rt_can_ecmp == rt6_qualify_for_ecmp(iter)) { found++; break; } fallback_ins = fallback_ins ?: ins; goto next_iter; } if (rt6_duplicate_nexthop(iter, rt)) { if (rt->fib6_nsiblings) WRITE_ONCE(rt->fib6_nsiblings, 0); if (!(iter->fib6_flags & RTF_EXPIRES)) return -EEXIST; if (!(rt->fib6_flags & RTF_EXPIRES)) { fib6_clean_expires(iter); fib6_may_remove_gc_list(info->nl_net, iter); } else { fib6_set_expires(iter, rt->expires); fib6_add_gc_list(iter); } if (!(rt->fib6_flags & (RTF_ADDRCONF | RTF_PREFIX_RT)) && (iter->nh || !iter->fib6_nh->fib_nh_gw_family)) { iter->fib6_flags &= ~RTF_ADDRCONF; iter->fib6_flags &= ~RTF_PREFIX_RT; } if (rt->fib6_pmtu) fib6_metric_set(iter, RTAX_MTU, rt->fib6_pmtu); return -EEXIST; } /* If we have the same destination and the same metric, * but not the same gateway, then the route we try to * add is sibling to this route, increment our counter * of siblings, and later we will add our route to the * list. * Only static routes (which don't have flag * RTF_EXPIRES) are used for ECMPv6. * * To avoid long list, we only had siblings if the * route have a gateway. */ if (rt_can_ecmp && rt6_qualify_for_ecmp(iter)) WRITE_ONCE(rt->fib6_nsiblings, rt->fib6_nsiblings + 1); } if (iter->fib6_metric > rt->fib6_metric) break; next_iter: ins = &iter->fib6_next; } if (fallback_ins && !found) { /* No matching route with same ecmp-able-ness found, replace * first matching route */ ins = fallback_ins; iter = rcu_dereference_protected(*ins, lockdep_is_held(&rt->fib6_table->tb6_lock)); found++; } /* Reset round-robin state, if necessary */ if (ins == &fn->leaf) fn->rr_ptr = NULL; /* Link this route to others same route. */ if (rt->fib6_nsiblings) { unsigned int fib6_nsiblings; struct fib6_info *sibling, *temp_sibling; /* Find the first route that have the same metric */ sibling = leaf; notify_sibling_rt = true; while (sibling) { if (sibling->fib6_metric == rt->fib6_metric && rt6_qualify_for_ecmp(sibling)) { list_add_tail_rcu(&rt->fib6_siblings, &sibling->fib6_siblings); break; } sibling = rcu_dereference_protected(sibling->fib6_next, lockdep_is_held(&rt->fib6_table->tb6_lock)); notify_sibling_rt = false; } /* For each sibling in the list, increment the counter of * siblings. BUG() if counters does not match, list of siblings * is broken! */ fib6_nsiblings = 0; list_for_each_entry_safe(sibling, temp_sibling, &rt->fib6_siblings, fib6_siblings) { WRITE_ONCE(sibling->fib6_nsiblings, sibling->fib6_nsiblings + 1); BUG_ON(sibling->fib6_nsiblings != rt->fib6_nsiblings); fib6_nsiblings++; } BUG_ON(fib6_nsiblings != rt->fib6_nsiblings); rcu_read_lock(); rt6_multipath_rebalance(temp_sibling); rcu_read_unlock(); } /* * insert node */ if (!replace) { if (!add) pr_warn("NLM_F_CREATE should be set when creating new route\n"); add: nlflags |= NLM_F_CREATE; /* The route should only be notified if it is the first * route in the node or if it is added as a sibling * route to the first route in the node. */ if (!info->skip_notify_kernel && (notify_sibling_rt || ins == &fn->leaf)) { enum fib_event_type fib_event; if (notify_sibling_rt) fib_event = FIB_EVENT_ENTRY_APPEND; else fib_event = FIB_EVENT_ENTRY_REPLACE; err = call_fib6_entry_notifiers(info->nl_net, fib_event, rt, extack); if (err) { struct fib6_info *sibling, *next_sibling; /* If the route has siblings, then it first * needs to be unlinked from them. */ if (!rt->fib6_nsiblings) return err; list_for_each_entry_safe(sibling, next_sibling, &rt->fib6_siblings, fib6_siblings) WRITE_ONCE(sibling->fib6_nsiblings, sibling->fib6_nsiblings - 1); WRITE_ONCE(rt->fib6_nsiblings, 0); list_del_rcu(&rt->fib6_siblings); rcu_read_lock(); rt6_multipath_rebalance(next_sibling); rcu_read_unlock(); return err; } } rcu_assign_pointer(rt->fib6_next, iter); fib6_info_hold(rt); rcu_assign_pointer(rt->fib6_node, fn); rcu_assign_pointer(*ins, rt); if (!info->skip_notify) inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags); info->nl_net->ipv6.rt6_stats->fib_rt_entries++; if (!(fn->fn_flags & RTN_RTINFO)) { info->nl_net->ipv6.rt6_stats->fib_route_nodes++; fn->fn_flags |= RTN_RTINFO; } } else { int nsiblings; if (!found) { if (add) goto add; pr_warn("NLM_F_REPLACE set, but no existing node found!\n"); return -ENOENT; } if (!info->skip_notify_kernel && ins == &fn->leaf) { err = call_fib6_entry_notifiers(info->nl_net, FIB_EVENT_ENTRY_REPLACE, rt, extack); if (err) return err; } fib6_info_hold(rt); rcu_assign_pointer(rt->fib6_node, fn); rt->fib6_next = iter->fib6_next; rcu_assign_pointer(*ins, rt); if (!info->skip_notify) inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE); if (!(fn->fn_flags & RTN_RTINFO)) { info->nl_net->ipv6.rt6_stats->fib_route_nodes++; fn->fn_flags |= RTN_RTINFO; } nsiblings = iter->fib6_nsiblings; iter->fib6_node = NULL; list_add(&iter->purge_link, purge_list); if (rcu_access_pointer(fn->rr_ptr) == iter) fn->rr_ptr = NULL; if (nsiblings) { /* Replacing an ECMP route, remove all siblings */ ins = &rt->fib6_next; iter = rcu_dereference_protected(*ins, lockdep_is_held(&rt->fib6_table->tb6_lock)); while (iter) { if (iter->fib6_metric > rt->fib6_metric) break; if (rt6_qualify_for_ecmp(iter)) { *ins = iter->fib6_next; iter->fib6_node = NULL; list_add(&iter->purge_link, purge_list); if (rcu_access_pointer(fn->rr_ptr) == iter) fn->rr_ptr = NULL; nsiblings--; info->nl_net->ipv6.rt6_stats->fib_rt_entries--; } else { ins = &iter->fib6_next; } iter = rcu_dereference_protected(*ins, lockdep_is_held(&rt->fib6_table->tb6_lock)); } WARN_ON(nsiblings != 0); } } return 0; } static int fib6_add_rt2node_nh(struct fib6_node *fn, struct fib6_info *rt, struct nl_info *info, struct netlink_ext_ack *extack, struct list_head *purge_list) { int err; spin_lock(&rt->nh->lock); if (rt->nh->dead) { NL_SET_ERR_MSG(extack, "Nexthop has been deleted"); err = -EINVAL; } else { err = fib6_add_rt2node(fn, rt, info, extack, purge_list); if (!err) list_add(&rt->nh_list, &rt->nh->f6i_list); } spin_unlock(&rt->nh->lock); return err; } static void fib6_start_gc(struct net *net, struct fib6_info *rt) { if (!timer_pending(&net->ipv6.ip6_fib_timer) && (rt->fib6_flags & RTF_EXPIRES)) mod_timer(&net->ipv6.ip6_fib_timer, jiffies + READ_ONCE(net->ipv6.sysctl.ip6_rt_gc_interval)); } void fib6_force_start_gc(struct net *net) { if (!timer_pending(&net->ipv6.ip6_fib_timer)) mod_timer(&net->ipv6.ip6_fib_timer, jiffies + READ_ONCE(net->ipv6.sysctl.ip6_rt_gc_interval)); } static void __fib6_update_sernum_upto_root(struct fib6_info *rt, int sernum) { struct fib6_node *fn = rcu_dereference_protected(rt->fib6_node, lockdep_is_held(&rt->fib6_table->tb6_lock)); /* paired with smp_rmb() in fib6_get_cookie_safe() */ smp_wmb(); while (fn) { WRITE_ONCE(fn->fn_sernum, sernum); fn = rcu_dereference_protected(fn->parent, lockdep_is_held(&rt->fib6_table->tb6_lock)); } } void fib6_update_sernum_upto_root(struct net *net, struct fib6_info *rt) { __fib6_update_sernum_upto_root(rt, fib6_new_sernum(net)); } /* allow ipv4 to update sernum via ipv6_stub */ void fib6_update_sernum_stub(struct net *net, struct fib6_info *f6i) { spin_lock_bh(&f6i->fib6_table->tb6_lock); fib6_update_sernum_upto_root(net, f6i); spin_unlock_bh(&f6i->fib6_table->tb6_lock); } /* * Add routing information to the routing tree. * <destination addr>/<source addr> * with source addr info in sub-trees * Need to own table->tb6_lock */ int fib6_add(struct fib6_node *root, struct fib6_info *rt, struct nl_info *info, struct netlink_ext_ack *extack) { struct fib6_table *table = rt->fib6_table; LIST_HEAD(purge_list); struct fib6_node *fn; #ifdef CONFIG_IPV6_SUBTREES struct fib6_node *pn = NULL; #endif int err = -ENOMEM; int allow_create = 1; int replace_required = 0; if (info->nlh) { if (!(info->nlh->nlmsg_flags & NLM_F_CREATE)) allow_create = 0; if (info->nlh->nlmsg_flags & NLM_F_REPLACE) replace_required = 1; } if (!allow_create && !replace_required) pr_warn("RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n"); fn = fib6_add_1(info->nl_net, table, root, &rt->fib6_dst.addr, rt->fib6_dst.plen, offsetof(struct fib6_info, fib6_dst), allow_create, replace_required, extack); if (IS_ERR(fn)) { err = PTR_ERR(fn); fn = NULL; goto out; } #ifdef CONFIG_IPV6_SUBTREES pn = fn; if (rt->fib6_src.plen) { struct fib6_node *sn; if (!rcu_access_pointer(fn->subtree)) { struct fib6_node *sfn; /* * Create subtree. * * fn[main tree] * | * sfn[subtree root] * \ * sn[new leaf node] */ /* Create subtree root node */ sfn = node_alloc(info->nl_net); if (!sfn) goto failure; fib6_info_hold(info->nl_net->ipv6.fib6_null_entry); rcu_assign_pointer(sfn->leaf, info->nl_net->ipv6.fib6_null_entry); sfn->fn_flags = RTN_ROOT; /* Now add the first leaf node to new subtree */ sn = fib6_add_1(info->nl_net, table, sfn, &rt->fib6_src.addr, rt->fib6_src.plen, offsetof(struct fib6_info, fib6_src), allow_create, replace_required, extack); if (IS_ERR(sn)) { /* If it is failed, discard just allocated root, and then (in failure) stale node in main tree. */ node_free_immediate(info->nl_net, sfn); err = PTR_ERR(sn); goto failure; } /* Now link new subtree to main tree */ rcu_assign_pointer(sfn->parent, fn); rcu_assign_pointer(fn->subtree, sfn); } else { sn = fib6_add_1(info->nl_net, table, FIB6_SUBTREE(fn), &rt->fib6_src.addr, rt->fib6_src.plen, offsetof(struct fib6_info, fib6_src), allow_create, replace_required, extack); if (IS_ERR(sn)) { err = PTR_ERR(sn); goto failure; } } if (!rcu_access_pointer(fn->leaf)) { if (fn->fn_flags & RTN_TL_ROOT) { /* put back null_entry for root node */ rcu_assign_pointer(fn->leaf, info->nl_net->ipv6.fib6_null_entry); } else { fib6_info_hold(rt); rcu_assign_pointer(fn->leaf, rt); } } fn = sn; } #endif if (rt->nh) err = fib6_add_rt2node_nh(fn, rt, info, extack, &purge_list); else err = fib6_add_rt2node(fn, rt, info, extack, &purge_list); if (!err) { struct fib6_info *iter, *next; list_for_each_entry_safe(iter, next, &purge_list, purge_link) { list_del(&iter->purge_link); fib6_purge_rt(iter, fn, info->nl_net); fib6_info_release(iter); } __fib6_update_sernum_upto_root(rt, fib6_new_sernum(info->nl_net)); if (rt->fib6_flags & RTF_EXPIRES) fib6_add_gc_list(rt); fib6_start_gc(info->nl_net, rt); } out: if (err) { #ifdef CONFIG_IPV6_SUBTREES /* * If fib6_add_1 has cleared the old leaf pointer in the * super-tree leaf node we have to find a new one for it. */ if (pn != fn) { struct fib6_info *pn_leaf = rcu_dereference_protected(pn->leaf, lockdep_is_held(&table->tb6_lock)); if (pn_leaf == rt) { pn_leaf = NULL; RCU_INIT_POINTER(pn->leaf, NULL); fib6_info_release(rt); } if (!pn_leaf && !(pn->fn_flags & RTN_RTINFO)) { pn_leaf = fib6_find_prefix(info->nl_net, table, pn); if (!pn_leaf) pn_leaf = info->nl_net->ipv6.fib6_null_entry; fib6_info_hold(pn_leaf); rcu_assign_pointer(pn->leaf, pn_leaf); } } #endif goto failure; } else if (fib6_requires_src(rt)) { fib6_routes_require_src_inc(info->nl_net); } return err; failure: /* fn->leaf could be NULL and fib6_repair_tree() needs to be called if: * 1. fn is an intermediate node and we failed to add the new * route to it in both subtree creation failure and fib6_add_rt2node() * failure case. * 2. fn is the root node in the table and we fail to add the first * default route to it. */ if (fn && (!(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)) || (fn->fn_flags & RTN_TL_ROOT && !rcu_access_pointer(fn->leaf)))) fib6_repair_tree(info->nl_net, table, fn); return err; } /* * Routing tree lookup * */ struct lookup_args { int offset; /* key offset on fib6_info */ const struct in6_addr *addr; /* search key */ }; static struct fib6_node *fib6_node_lookup_1(struct fib6_node *root, struct lookup_args *args) { struct fib6_node *fn; __be32 dir; if (unlikely(args->offset == 0)) return NULL; /* * Descend on a tree */ fn = root; for (;;) { struct fib6_node *next; dir = addr_bit_set(args->addr, fn->fn_bit); next = dir ? rcu_dereference(fn->right) : rcu_dereference(fn->left); if (next) { fn = next; continue; } break; } while (fn) { struct fib6_node *subtree = FIB6_SUBTREE(fn); if (subtree || fn->fn_flags & RTN_RTINFO) { struct fib6_info *leaf = rcu_dereference(fn->leaf); struct rt6key *key; if (!leaf) goto backtrack; key = (struct rt6key *) ((u8 *)leaf + args->offset); if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) { #ifdef CONFIG_IPV6_SUBTREES if (subtree) { struct fib6_node *sfn; sfn = fib6_node_lookup_1(subtree, args + 1); if (!sfn) goto backtrack; fn = sfn; } #endif if (fn->fn_flags & RTN_RTINFO) return fn; } } backtrack: if (fn->fn_flags & RTN_ROOT) break; fn = rcu_dereference(fn->parent); } return NULL; } /* called with rcu_read_lock() held */ struct fib6_node *fib6_node_lookup(struct fib6_node *root, const struct in6_addr *daddr, const struct in6_addr *saddr) { struct fib6_node *fn; struct lookup_args args[] = { { .offset = offsetof(struct fib6_info, fib6_dst), .addr = daddr, }, #ifdef CONFIG_IPV6_SUBTREES { .offset = offsetof(struct fib6_info, fib6_src), .addr = saddr, }, #endif { .offset = 0, /* sentinel */ } }; fn = fib6_node_lookup_1(root, daddr ? args : args + 1); if (!fn || fn->fn_flags & RTN_TL_ROOT) fn = root; return fn; } /* * Get node with specified destination prefix (and source prefix, * if subtrees are used) * exact_match == true means we try to find fn with exact match of * the passed in prefix addr * exact_match == false means we try to find fn with longest prefix * match of the passed in prefix addr. This is useful for finding fn * for cached route as it will be stored in the exception table under * the node with longest prefix length. */ static struct fib6_node *fib6_locate_1(struct fib6_node *root, const struct in6_addr *addr, int plen, int offset, bool exact_match) { struct fib6_node *fn, *prev = NULL; for (fn = root; fn ; ) { struct fib6_info *leaf = rcu_dereference(fn->leaf); struct rt6key *key; /* This node is being deleted */ if (!leaf) { if (plen <= fn->fn_bit) goto out; else goto next; } key = (struct rt6key *)((u8 *)leaf + offset); /* * Prefix match */ if (plen < fn->fn_bit || !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) goto out; if (plen == fn->fn_bit) return fn; if (fn->fn_flags & RTN_RTINFO) prev = fn; next: /* * We have more bits to go */ if (addr_bit_set(addr, fn->fn_bit)) fn = rcu_dereference(fn->right); else fn = rcu_dereference(fn->left); } out: if (exact_match) return NULL; else return prev; } struct fib6_node *fib6_locate(struct fib6_node *root, const struct in6_addr *daddr, int dst_len, const struct in6_addr *saddr, int src_len, bool exact_match) { struct fib6_node *fn; fn = fib6_locate_1(root, daddr, dst_len, offsetof(struct fib6_info, fib6_dst), exact_match); #ifdef CONFIG_IPV6_SUBTREES if (src_len) { WARN_ON(saddr == NULL); if (fn) { struct fib6_node *subtree = FIB6_SUBTREE(fn); if (subtree) { fn = fib6_locate_1(subtree, saddr, src_len, offsetof(struct fib6_info, fib6_src), exact_match); } } } #endif if (fn && fn->fn_flags & RTN_RTINFO) return fn; return NULL; } /* * Deletion * */ static struct fib6_info *fib6_find_prefix(struct net *net, struct fib6_table *table, struct fib6_node *fn) { struct fib6_node *child_left, *child_right; if (fn->fn_flags & RTN_ROOT) return net->ipv6.fib6_null_entry; while (fn) { child_left = rcu_dereference_protected(fn->left, lockdep_is_held(&table->tb6_lock)); child_right = rcu_dereference_protected(fn->right, lockdep_is_held(&table->tb6_lock)); if (child_left) return rcu_dereference_protected(child_left->leaf, lockdep_is_held(&table->tb6_lock)); if (child_right) return rcu_dereference_protected(child_right->leaf, lockdep_is_held(&table->tb6_lock)); fn = FIB6_SUBTREE(fn); } return NULL; } /* * Called to trim the tree of intermediate nodes when possible. "fn" * is the node we want to try and remove. * Need to own table->tb6_lock */ static struct fib6_node *fib6_repair_tree(struct net *net, struct fib6_table *table, struct fib6_node *fn) { int children; int nstate; struct fib6_node *child; struct fib6_walker *w; int iter = 0; /* Set fn->leaf to null_entry for root node. */ if (fn->fn_flags & RTN_TL_ROOT) { rcu_assign_pointer(fn->leaf, net->ipv6.fib6_null_entry); return fn; } for (;;) { struct fib6_node *fn_r = rcu_dereference_protected(fn->right, lockdep_is_held(&table->tb6_lock)); struct fib6_node *fn_l = rcu_dereference_protected(fn->left, lockdep_is_held(&table->tb6_lock)); struct fib6_node *pn = rcu_dereference_protected(fn->parent, lockdep_is_held(&table->tb6_lock)); struct fib6_node *pn_r = rcu_dereference_protected(pn->right, lockdep_is_held(&table->tb6_lock)); struct fib6_node *pn_l = rcu_dereference_protected(pn->left, lockdep_is_held(&table->tb6_lock)); struct fib6_info *fn_leaf = rcu_dereference_protected(fn->leaf, lockdep_is_held(&table->tb6_lock)); struct fib6_info *pn_leaf = rcu_dereference_protected(pn->leaf, lockdep_is_held(&table->tb6_lock)); struct fib6_info *new_fn_leaf; pr_debug("fixing tree: plen=%d iter=%d\n", fn->fn_bit, iter); iter++; WARN_ON(fn->fn_flags & RTN_RTINFO); WARN_ON(fn->fn_flags & RTN_TL_ROOT); WARN_ON(fn_leaf); children = 0; child = NULL; if (fn_r) { child = fn_r; children |= 1; } if (fn_l) { child = fn_l; children |= 2; } if (children == 3 || FIB6_SUBTREE(fn) #ifdef CONFIG_IPV6_SUBTREES /* Subtree root (i.e. fn) may have one child */ || (children && fn->fn_flags & RTN_ROOT) #endif ) { new_fn_leaf = fib6_find_prefix(net, table, fn); #if RT6_DEBUG >= 2 if (!new_fn_leaf) { WARN_ON(!new_fn_leaf); new_fn_leaf = net->ipv6.fib6_null_entry; } #endif fib6_info_hold(new_fn_leaf); rcu_assign_pointer(fn->leaf, new_fn_leaf); return pn; } #ifdef CONFIG_IPV6_SUBTREES if (FIB6_SUBTREE(pn) == fn) { WARN_ON(!(fn->fn_flags & RTN_ROOT)); RCU_INIT_POINTER(pn->subtree, NULL); nstate = FWS_L; } else { WARN_ON(fn->fn_flags & RTN_ROOT); #endif if (pn_r == fn) rcu_assign_pointer(pn->right, child); else if (pn_l == fn) rcu_assign_pointer(pn->left, child); #if RT6_DEBUG >= 2 else WARN_ON(1); #endif if (child) rcu_assign_pointer(child->parent, pn); nstate = FWS_R; #ifdef CONFIG_IPV6_SUBTREES } #endif read_lock(&net->ipv6.fib6_walker_lock); FOR_WALKERS(net, w) { if (!child) { if (w->node == fn) { pr_debug("W %p adjusted by delnode 1, s=%d/%d\n", w, w->state, nstate); w->node = pn; w->state = nstate; } } else { if (w->node == fn) { w->node = child; if (children&2) { pr_debug("W %p adjusted by delnode 2, s=%d\n", w, w->state); w->state = w->state >= FWS_R ? FWS_U : FWS_INIT; } else { pr_debug("W %p adjusted by delnode 2, s=%d\n", w, w->state); w->state = w->state >= FWS_C ? FWS_U : FWS_INIT; } } } } read_unlock(&net->ipv6.fib6_walker_lock); node_free(net, fn); if (pn->fn_flags & RTN_RTINFO || FIB6_SUBTREE(pn)) return pn; RCU_INIT_POINTER(pn->leaf, NULL); fib6_info_release(pn_leaf); fn = pn; } } static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn, struct fib6_info __rcu **rtp, struct nl_info *info) { struct fib6_info *leaf, *replace_rt = NULL; struct fib6_walker *w; struct fib6_info *rt = rcu_dereference_protected(*rtp, lockdep_is_held(&table->tb6_lock)); struct net *net = info->nl_net; bool notify_del = false; /* If the deleted route is the first in the node and it is not part of * a multipath route, then we need to replace it with the next route * in the node, if exists. */ leaf = rcu_dereference_protected(fn->leaf, lockdep_is_held(&table->tb6_lock)); if (leaf == rt && !rt->fib6_nsiblings) { if (rcu_access_pointer(rt->fib6_next)) replace_rt = rcu_dereference_protected(rt->fib6_next, lockdep_is_held(&table->tb6_lock)); else notify_del = true; } /* Unlink it */ *rtp = rt->fib6_next; rt->fib6_node = NULL; net->ipv6.rt6_stats->fib_rt_entries--; net->ipv6.rt6_stats->fib_discarded_routes++; /* Reset round-robin state, if necessary */ if (rcu_access_pointer(fn->rr_ptr) == rt) fn->rr_ptr = NULL; /* Remove this entry from other siblings */ if (rt->fib6_nsiblings) { struct fib6_info *sibling, *next_sibling; /* The route is deleted from a multipath route. If this * multipath route is the first route in the node, then we need * to emit a delete notification. Otherwise, we need to skip * the notification. */ if (rt->fib6_metric == leaf->fib6_metric && rt6_qualify_for_ecmp(leaf)) notify_del = true; list_for_each_entry_safe(sibling, next_sibling, &rt->fib6_siblings, fib6_siblings) WRITE_ONCE(sibling->fib6_nsiblings, sibling->fib6_nsiblings - 1); WRITE_ONCE(rt->fib6_nsiblings, 0); list_del_rcu(&rt->fib6_siblings); rt6_multipath_rebalance(next_sibling); } /* Adjust walkers */ read_lock(&net->ipv6.fib6_walker_lock); FOR_WALKERS(net, w) { if (w->state == FWS_C && w->leaf == rt) { pr_debug("walker %p adjusted by delroute\n", w); w->leaf = rcu_dereference_protected(rt->fib6_next, lockdep_is_held(&table->tb6_lock)); if (!w->leaf) w->state = FWS_U; } } read_unlock(&net->ipv6.fib6_walker_lock); /* If it was last route, call fib6_repair_tree() to: * 1. For root node, put back null_entry as how the table was created. * 2. For other nodes, expunge its radix tree node. */ if (!rcu_access_pointer(fn->leaf)) { if (!(fn->fn_flags & RTN_TL_ROOT)) { fn->fn_flags &= ~RTN_RTINFO; net->ipv6.rt6_stats->fib_route_nodes--; } fn = fib6_repair_tree(net, table, fn); } fib6_purge_rt(rt, fn, net); if (!info->skip_notify_kernel) { if (notify_del) call_fib6_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, rt, NULL); else if (replace_rt) call_fib6_entry_notifiers_replace(net, replace_rt); } if (!info->skip_notify) inet6_rt_notify(RTM_DELROUTE, rt, info, 0); fib6_info_release(rt); } /* Need to own table->tb6_lock */ int fib6_del(struct fib6_info *rt, struct nl_info *info) { struct net *net = info->nl_net; struct fib6_info __rcu **rtp; struct fib6_info __rcu **rtp_next; struct fib6_table *table; struct fib6_node *fn; if (rt == net->ipv6.fib6_null_entry) return -ENOENT; table = rt->fib6_table; fn = rcu_dereference_protected(rt->fib6_node, lockdep_is_held(&table->tb6_lock)); if (!fn) return -ENOENT; WARN_ON(!(fn->fn_flags & RTN_RTINFO)); /* * Walk the leaf entries looking for ourself */ for (rtp = &fn->leaf; *rtp; rtp = rtp_next) { struct fib6_info *cur = rcu_dereference_protected(*rtp, lockdep_is_held(&table->tb6_lock)); if (rt == cur) { if (fib6_requires_src(cur)) fib6_routes_require_src_dec(info->nl_net); fib6_del_route(table, fn, rtp, info); return 0; } rtp_next = &cur->fib6_next; } return -ENOENT; } /* * Tree traversal function. * * Certainly, it is not interrupt safe. * However, it is internally reenterable wrt itself and fib6_add/fib6_del. * It means, that we can modify tree during walking * and use this function for garbage collection, clone pruning, * cleaning tree when a device goes down etc. etc. * * It guarantees that every node will be traversed, * and that it will be traversed only once. * * Callback function w->func may return: * 0 -> continue walking. * positive value -> walking is suspended (used by tree dumps, * and probably by gc, if it will be split to several slices) * negative value -> terminate walking. * * The function itself returns: * 0 -> walk is complete. * >0 -> walk is incomplete (i.e. suspended) * <0 -> walk is terminated by an error. * * This function is called with tb6_lock held. */ static int fib6_walk_continue(struct fib6_walker *w) { struct fib6_node *fn, *pn, *left, *right; /* w->root should always be table->tb6_root */ WARN_ON_ONCE(!(w->root->fn_flags & RTN_TL_ROOT)); for (;;) { fn = w->node; if (!fn) return 0; switch (w->state) { #ifdef CONFIG_IPV6_SUBTREES case FWS_S: if (FIB6_SUBTREE(fn)) { w->node = FIB6_SUBTREE(fn); continue; } w->state = FWS_L; fallthrough; #endif case FWS_L: left = rcu_dereference_protected(fn->left, 1); if (left) { w->node = left; w->state = FWS_INIT; continue; } w->state = FWS_R; fallthrough; case FWS_R: right = rcu_dereference_protected(fn->right, 1); if (right) { w->node = right; w->state = FWS_INIT; continue; } w->state = FWS_C; w->leaf = rcu_dereference_protected(fn->leaf, 1); fallthrough; case FWS_C: if (w->leaf && fn->fn_flags & RTN_RTINFO) { int err; if (w->skip) { w->skip--; goto skip; } err = w->func(w); if (err) return err; w->count++; continue; } skip: w->state = FWS_U; fallthrough; case FWS_U: if (fn == w->root) return 0; pn = rcu_dereference_protected(fn->parent, 1); left = rcu_dereference_protected(pn->left, 1); right = rcu_dereference_protected(pn->right, 1); w->node = pn; #ifdef CONFIG_IPV6_SUBTREES if (FIB6_SUBTREE(pn) == fn) { WARN_ON(!(fn->fn_flags & RTN_ROOT)); w->state = FWS_L; continue; } #endif if (left == fn) { w->state = FWS_R; continue; } if (right == fn) { w->state = FWS_C; w->leaf = rcu_dereference_protected(w->node->leaf, 1); continue; } #if RT6_DEBUG >= 2 WARN_ON(1); #endif } } } static int fib6_walk(struct net *net, struct fib6_walker *w) { int res; w->state = FWS_INIT; w->node = w->root; fib6_walker_link(net, w); res = fib6_walk_continue(w); if (res <= 0) fib6_walker_unlink(net, w); return res; } static int fib6_clean_node(struct fib6_walker *w) { int res; struct fib6_info *rt; struct fib6_cleaner *c = container_of(w, struct fib6_cleaner, w); struct nl_info info = { .nl_net = c->net, .skip_notify = c->skip_notify, }; if (c->sernum != FIB6_NO_SERNUM_CHANGE && READ_ONCE(w->node->fn_sernum) != c->sernum) WRITE_ONCE(w->node->fn_sernum, c->sernum); if (!c->func) { WARN_ON_ONCE(c->sernum == FIB6_NO_SERNUM_CHANGE); w->leaf = NULL; return 0; } for_each_fib6_walker_rt(w) { res = c->func(rt, c->arg); if (res == -1) { w->leaf = rt; res = fib6_del(rt, &info); if (res) { #if RT6_DEBUG >= 2 pr_debug("%s: del failed: rt=%p@%p err=%d\n", __func__, rt, rcu_access_pointer(rt->fib6_node), res); #endif continue; } return 0; } else if (res == -2) { if (WARN_ON(!rt->fib6_nsiblings)) continue; rt = list_last_entry(&rt->fib6_siblings, struct fib6_info, fib6_siblings); continue; } WARN_ON(res != 0); } w->leaf = rt; return 0; } /* * Convenient frontend to tree walker. * * func is called on each route. * It may return -2 -> skip multipath route. * -1 -> delete this route. * 0 -> continue walking */ static void fib6_clean_tree(struct net *net, struct fib6_node *root, int (*func)(struct fib6_info *, void *arg), int sernum, void *arg, bool skip_notify) { struct fib6_cleaner c; c.w.root = root; c.w.func = fib6_clean_node; c.w.count = 0; c.w.skip = 0; c.w.skip_in_node = 0; c.func = func; c.sernum = sernum; c.arg = arg; c.net = net; c.skip_notify = skip_notify; fib6_walk(net, &c.w); } static void __fib6_clean_all(struct net *net, int (*func)(struct fib6_info *, void *), int sernum, void *arg, bool skip_notify) { struct fib6_table *table; struct hlist_head *head; unsigned int h; rcu_read_lock(); for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { head = &net->ipv6.fib_table_hash[h]; hlist_for_each_entry_rcu(table, head, tb6_hlist) { spin_lock_bh(&table->tb6_lock); fib6_clean_tree(net, &table->tb6_root, func, sernum, arg, skip_notify); spin_unlock_bh(&table->tb6_lock); } } rcu_read_unlock(); } void fib6_clean_all(struct net *net, int (*func)(struct fib6_info *, void *), void *arg) { __fib6_clean_all(net, func, FIB6_NO_SERNUM_CHANGE, arg, false); } void fib6_clean_all_skip_notify(struct net *net, int (*func)(struct fib6_info *, void *), void *arg) { __fib6_clean_all(net, func, FIB6_NO_SERNUM_CHANGE, arg, true); } static void fib6_flush_trees(struct net *net) { int new_sernum = fib6_new_sernum(net); __fib6_clean_all(net, NULL, new_sernum, NULL, false); } /* * Garbage collection */ void fib6_age_exceptions(struct fib6_info *rt, struct fib6_gc_args *gc_args, unsigned long now) { bool may_expire = rt->fib6_flags & RTF_EXPIRES && rt->expires; int old_more = gc_args->more; rt6_age_exceptions(rt, gc_args, now); if (!may_expire && old_more == gc_args->more) fib6_remove_gc_list(rt); } static int fib6_age(struct fib6_info *rt, struct fib6_gc_args *gc_args) { unsigned long now = jiffies; /* * check addrconf expiration here. * Routes are expired even if they are in use. */ if (rt->fib6_flags & RTF_EXPIRES && rt->expires) { if (time_after(now, rt->expires)) { pr_debug("expiring %p\n", rt); return -1; } gc_args->more++; } /* Also age clones in the exception table. * Note, that clones are aged out * only if they are not in use now. */ fib6_age_exceptions(rt, gc_args, now); return 0; } static void fib6_gc_table(struct net *net, struct fib6_table *tb6, struct fib6_gc_args *gc_args) { struct fib6_info *rt; struct hlist_node *n; struct nl_info info = { .nl_net = net, .skip_notify = false, }; hlist_for_each_entry_safe(rt, n, &tb6->tb6_gc_hlist, gc_link) if (fib6_age(rt, gc_args) == -1) fib6_del(rt, &info); } static void fib6_gc_all(struct net *net, struct fib6_gc_args *gc_args) { struct fib6_table *table; struct hlist_head *head; unsigned int h; rcu_read_lock(); for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { head = &net->ipv6.fib_table_hash[h]; hlist_for_each_entry_rcu(table, head, tb6_hlist) { spin_lock_bh(&table->tb6_lock); fib6_gc_table(net, table, gc_args); spin_unlock_bh(&table->tb6_lock); } } rcu_read_unlock(); } void fib6_run_gc(unsigned long expires, struct net *net, bool force) { struct fib6_gc_args gc_args; int ip6_rt_gc_interval; unsigned long now; if (force) { spin_lock_bh(&net->ipv6.fib6_gc_lock); } else if (!spin_trylock_bh(&net->ipv6.fib6_gc_lock)) { mod_timer(&net->ipv6.ip6_fib_timer, jiffies + HZ); return; } ip6_rt_gc_interval = READ_ONCE(net->ipv6.sysctl.ip6_rt_gc_interval); gc_args.timeout = expires ? (int)expires : ip6_rt_gc_interval; gc_args.more = 0; fib6_gc_all(net, &gc_args); now = jiffies; net->ipv6.ip6_rt_last_gc = now; if (gc_args.more) mod_timer(&net->ipv6.ip6_fib_timer, round_jiffies(now + ip6_rt_gc_interval)); else timer_delete(&net->ipv6.ip6_fib_timer); spin_unlock_bh(&net->ipv6.fib6_gc_lock); } static void fib6_gc_timer_cb(struct timer_list *t) { struct net *arg = timer_container_of(arg, t, ipv6.ip6_fib_timer); fib6_run_gc(0, arg, true); } static int __net_init fib6_net_init(struct net *net) { size_t size = sizeof(struct hlist_head) * FIB6_TABLE_HASHSZ; int err; err = fib6_notifier_init(net); if (err) return err; /* Default to 3-tuple */ net->ipv6.sysctl.multipath_hash_fields = FIB_MULTIPATH_HASH_FIELD_DEFAULT_MASK; spin_lock_init(&net->ipv6.fib6_gc_lock); rwlock_init(&net->ipv6.fib6_walker_lock); INIT_LIST_HEAD(&net->ipv6.fib6_walkers); timer_setup(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, 0); net->ipv6.rt6_stats = kzalloc_obj(*net->ipv6.rt6_stats); if (!net->ipv6.rt6_stats) goto out_notifier; /* Avoid false sharing : Use at least a full cache line */ size = max_t(size_t, size, L1_CACHE_BYTES); net->ipv6.fib_table_hash = kzalloc(size, GFP_KERNEL); if (!net->ipv6.fib_table_hash) goto out_rt6_stats; spin_lock_init(&net->ipv6.fib_table_hash_lock); net->ipv6.fib6_main_tbl = kzalloc_obj(*net->ipv6.fib6_main_tbl); if (!net->ipv6.fib6_main_tbl) goto out_fib_table_hash; net->ipv6.fib6_main_tbl->tb6_id = RT6_TABLE_MAIN; rcu_assign_pointer(net->ipv6.fib6_main_tbl->tb6_root.leaf, net->ipv6.fib6_null_entry); net->ipv6.fib6_main_tbl->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; inet_peer_base_init(&net->ipv6.fib6_main_tbl->tb6_peers); INIT_HLIST_HEAD(&net->ipv6.fib6_main_tbl->tb6_gc_hlist); #ifdef CONFIG_IPV6_MULTIPLE_TABLES net->ipv6.fib6_local_tbl = kzalloc_obj(*net->ipv6.fib6_local_tbl); if (!net->ipv6.fib6_local_tbl) goto out_fib6_main_tbl; net->ipv6.fib6_local_tbl->tb6_id = RT6_TABLE_LOCAL; rcu_assign_pointer(net->ipv6.fib6_local_tbl->tb6_root.leaf, net->ipv6.fib6_null_entry); net->ipv6.fib6_local_tbl->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO; inet_peer_base_init(&net->ipv6.fib6_local_tbl->tb6_peers); INIT_HLIST_HEAD(&net->ipv6.fib6_local_tbl->tb6_gc_hlist); #endif fib6_tables_init(net); return 0; #ifdef CONFIG_IPV6_MULTIPLE_TABLES out_fib6_main_tbl: kfree(net->ipv6.fib6_main_tbl); #endif out_fib_table_hash: kfree(net->ipv6.fib_table_hash); out_rt6_stats: kfree(net->ipv6.rt6_stats); out_notifier: fib6_notifier_exit(net); return -ENOMEM; } static void fib6_net_exit(struct net *net) { unsigned int i; timer_delete_sync(&net->ipv6.ip6_fib_timer); for (i = 0; i < FIB6_TABLE_HASHSZ; i++) { struct hlist_head *head = &net->ipv6.fib_table_hash[i]; struct hlist_node *tmp; struct fib6_table *tb; hlist_for_each_entry_safe(tb, tmp, head, tb6_hlist) { hlist_del(&tb->tb6_hlist); fib6_free_table(tb); } } kfree(net->ipv6.fib_table_hash); kfree(net->ipv6.rt6_stats); fib6_notifier_exit(net); } static struct pernet_operations fib6_net_ops = { .init = fib6_net_init, .exit = fib6_net_exit, }; static const struct rtnl_msg_handler fib6_rtnl_msg_handlers[] __initconst_or_module = { {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETROUTE, .dumpit = inet6_dump_fib, .flags = RTNL_FLAG_DUMP_UNLOCKED | RTNL_FLAG_DUMP_SPLIT_NLM_DONE}, }; int __init fib6_init(void) { int ret = -ENOMEM; fib6_node_kmem = KMEM_CACHE(fib6_node, SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT); if (!fib6_node_kmem) goto out; ret = register_pernet_subsys(&fib6_net_ops); if (ret) goto out_kmem_cache_create; ret = rtnl_register_many(fib6_rtnl_msg_handlers); if (ret) goto out_unregister_subsys; __fib6_flush_trees = fib6_flush_trees; out: return ret; out_unregister_subsys: unregister_pernet_subsys(&fib6_net_ops); out_kmem_cache_create: kmem_cache_destroy(fib6_node_kmem); goto out; } void fib6_gc_cleanup(void) { unregister_pernet_subsys(&fib6_net_ops); kmem_cache_destroy(fib6_node_kmem); } #ifdef CONFIG_PROC_FS static int ipv6_route_native_seq_show(struct seq_file *seq, void *v) { struct fib6_info *rt = v; struct ipv6_route_iter *iter = seq->private; struct fib6_nh *fib6_nh = rt->fib6_nh; unsigned int flags = rt->fib6_flags; const struct net_device *dev; if (rt->nh) fib6_nh = nexthop_fib6_nh(rt->nh); seq_printf(seq, "%pi6 %02x ", &rt->fib6_dst.addr, rt->fib6_dst.plen); #ifdef CONFIG_IPV6_SUBTREES seq_printf(seq, "%pi6 %02x ", &rt->fib6_src.addr, rt->fib6_src.plen); #else seq_puts(seq, "00000000000000000000000000000000 00 "); #endif if (fib6_nh->fib_nh_gw_family) { flags |= RTF_GATEWAY; seq_printf(seq, "%pi6", &fib6_nh->fib_nh_gw6); } else { seq_puts(seq, "00000000000000000000000000000000"); } dev = fib6_nh->fib_nh_dev; seq_printf(seq, " %08x %08x %08x %08x %8s\n", rt->fib6_metric, refcount_read(&rt->fib6_ref), 0, flags, dev ? dev->name : ""); iter->w.leaf = NULL; return 0; } static int ipv6_route_yield(struct fib6_walker *w) { struct ipv6_route_iter *iter = w->args; if (!iter->skip) return 1; do { iter->w.leaf = rcu_dereference_protected( iter->w.leaf->fib6_next, lockdep_is_held(&iter->tbl->tb6_lock)); iter->skip--; if (!iter->skip && iter->w.leaf) return 1; } while (iter->w.leaf); return 0; } static void ipv6_route_seq_setup_walk(struct ipv6_route_iter *iter, struct net *net) { memset(&iter->w, 0, sizeof(iter->w)); iter->w.func = ipv6_route_yield; iter->w.root = &iter->tbl->tb6_root; iter->w.state = FWS_INIT; iter->w.node = iter->w.root; iter->w.args = iter; iter->sernum = READ_ONCE(iter->w.root->fn_sernum); INIT_LIST_HEAD(&iter->w.lh); fib6_walker_link(net, &iter->w); } static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl, struct net *net) { unsigned int h; struct hlist_node *node; if (tbl) { h = (tbl->tb6_id & (FIB6_TABLE_HASHSZ - 1)) + 1; node = rcu_dereference(hlist_next_rcu(&tbl->tb6_hlist)); } else { h = 0; node = NULL; } while (!node && h < FIB6_TABLE_HASHSZ) { node = rcu_dereference( hlist_first_rcu(&net->ipv6.fib_table_hash[h++])); } return hlist_entry_safe(node, struct fib6_table, tb6_hlist); } static void ipv6_route_check_sernum(struct ipv6_route_iter *iter) { int sernum = READ_ONCE(iter->w.root->fn_sernum); if (iter->sernum != sernum) { iter->sernum = sernum; iter->w.state = FWS_INIT; iter->w.node = iter->w.root; WARN_ON(iter->w.skip); iter->w.skip = iter->w.count; } } static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos) { int r; struct fib6_info *n; struct net *net = seq_file_net(seq); struct ipv6_route_iter *iter = seq->private; ++(*pos); if (!v) goto iter_table; n = rcu_dereference(((struct fib6_info *)v)->fib6_next); if (n) return n; iter_table: ipv6_route_check_sernum(iter); spin_lock_bh(&iter->tbl->tb6_lock); r = fib6_walk_continue(&iter->w); spin_unlock_bh(&iter->tbl->tb6_lock); if (r > 0) { return iter->w.leaf; } else if (r < 0) { fib6_walker_unlink(net, &iter->w); return NULL; } fib6_walker_unlink(net, &iter->w); iter->tbl = ipv6_route_seq_next_table(iter->tbl, net); if (!iter->tbl) return NULL; ipv6_route_seq_setup_walk(iter, net); goto iter_table; } static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { struct net *net = seq_file_net(seq); struct ipv6_route_iter *iter = seq->private; rcu_read_lock(); iter->tbl = ipv6_route_seq_next_table(NULL, net); iter->skip = *pos; if (iter->tbl) { loff_t p = 0; ipv6_route_seq_setup_walk(iter, net); return ipv6_route_seq_next(seq, NULL, &p); } else { return NULL; } } static bool ipv6_route_iter_active(struct ipv6_route_iter *iter) { struct fib6_walker *w = &iter->w; return w->node && !(w->state == FWS_U && w->node == w->root); } static void ipv6_route_native_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { struct net *net = seq_file_net(seq); struct ipv6_route_iter *iter = seq->private; if (ipv6_route_iter_active(iter)) fib6_walker_unlink(net, &iter->w); rcu_read_unlock(); } #if IS_BUILTIN(CONFIG_IPV6) && defined(CONFIG_BPF_SYSCALL) static int ipv6_route_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta, void *v) { struct bpf_iter__ipv6_route ctx; ctx.meta = meta; ctx.rt = v; return bpf_iter_run_prog(prog, &ctx); } static int ipv6_route_seq_show(struct seq_file *seq, void *v) { struct ipv6_route_iter *iter = seq->private; struct bpf_iter_meta meta; struct bpf_prog *prog; int ret; meta.seq = seq; prog = bpf_iter_get_info(&meta, false); if (!prog) return ipv6_route_native_seq_show(seq, v); ret = ipv6_route_prog_seq_show(prog, &meta, v); iter->w.leaf = NULL; return ret; } static void ipv6_route_seq_stop(struct seq_file *seq, void *v) { struct bpf_iter_meta meta; struct bpf_prog *prog; if (!v) { meta.seq = seq; prog = bpf_iter_get_info(&meta, true); if (prog) (void)ipv6_route_prog_seq_show(prog, &meta, v); } ipv6_route_native_seq_stop(seq, v); } #else static int ipv6_route_seq_show(struct seq_file *seq, void *v) { return ipv6_route_native_seq_show(seq, v); } static void ipv6_route_seq_stop(struct seq_file *seq, void *v) { ipv6_route_native_seq_stop(seq, v); } #endif const struct seq_operations ipv6_route_seq_ops = { .start = ipv6_route_seq_start, .next = ipv6_route_seq_next, .stop = ipv6_route_seq_stop, .show = ipv6_route_seq_show }; #endif /* CONFIG_PROC_FS */
42 55 52 13 55 168 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 #ifndef __DRM_GEM_H__ #define __DRM_GEM_H__ /* * GEM Graphics Execution Manager Driver Interfaces * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * Copyright (c) 2009-2010, Code Aurora Forum. * All rights reserved. * Copyright © 2014 Intel Corporation * Daniel Vetter <daniel.vetter@ffwll.ch> * * Author: Rickard E. (Rik) Faith <faith@valinux.com> * Author: Gareth Hughes <gareth@valinux.com> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include <linux/kref.h> #include <linux/dma-buf.h> #include <linux/dma-resv.h> #include <linux/list.h> #include <linux/mutex.h> #ifdef CONFIG_TRANSPARENT_HUGEPAGE #include <drm/drm_device.h> #endif #include <drm/drm_vma_manager.h> struct iosys_map; struct drm_gem_object; /** * enum drm_gem_object_status - bitmask of object state for fdinfo reporting * @DRM_GEM_OBJECT_RESIDENT: object is resident in memory (ie. not unpinned) * @DRM_GEM_OBJECT_PURGEABLE: object marked as purgeable by userspace * @DRM_GEM_OBJECT_ACTIVE: object is currently used by an active submission * * Bitmask of status used for fdinfo memory stats, see &drm_gem_object_funcs.status * and drm_show_fdinfo(). Note that an object can report DRM_GEM_OBJECT_PURGEABLE * and be active or not resident, in which case drm_show_fdinfo() will not * account for it as purgeable. So drivers do not need to check if the buffer * is idle and resident to return this bit, i.e. userspace can mark a buffer as * purgeable even while it is still busy on the GPU. It will not get reported in * the puregeable stats until it becomes idle. The status gem object func does * not need to consider this. */ enum drm_gem_object_status { DRM_GEM_OBJECT_RESIDENT = BIT(0), DRM_GEM_OBJECT_PURGEABLE = BIT(1), DRM_GEM_OBJECT_ACTIVE = BIT(2), }; /** * struct drm_gem_object_funcs - GEM object functions */ struct drm_gem_object_funcs { /** * @free: * * Deconstructor for drm_gem_objects. * * This callback is mandatory. */ void (*free)(struct drm_gem_object *obj); /** * @open: * * Called upon GEM handle creation. * * This callback is optional. */ int (*open)(struct drm_gem_object *obj, struct drm_file *file); /** * @close: * * Called upon GEM handle release. * * This callback is optional. */ void (*close)(struct drm_gem_object *obj, struct drm_file *file); /** * @print_info: * * If driver subclasses struct &drm_gem_object, it can implement this * optional hook for printing additional driver specific info. * * drm_printf_indent() should be used in the callback passing it the * indent argument. * * This callback is called from drm_gem_print_info(). * * This callback is optional. */ void (*print_info)(struct drm_printer *p, unsigned int indent, const struct drm_gem_object *obj); /** * @export: * * Export backing buffer as a &dma_buf. * If this is not set drm_gem_prime_export() is used. * * This callback is optional. */ struct dma_buf *(*export)(struct drm_gem_object *obj, int flags); /** * @pin: * * Pin backing buffer in memory, such that dma-buf importers can * access it. Used by the drm_gem_map_attach() helper. * * This callback is optional. */ int (*pin)(struct drm_gem_object *obj); /** * @unpin: * * Unpin backing buffer. Used by the drm_gem_map_detach() helper. * * This callback is optional. */ void (*unpin)(struct drm_gem_object *obj); /** * @get_sg_table: * * Returns a Scatter-Gather table representation of the buffer. * Used when exporting a buffer by the drm_gem_map_dma_buf() helper. * Releasing is done by calling dma_unmap_sg_attrs() and sg_free_table() * in drm_gem_unmap_buf(), therefore these helpers and this callback * here cannot be used for sg tables pointing at driver private memory * ranges. * * See also drm_prime_pages_to_sg(). */ struct sg_table *(*get_sg_table)(struct drm_gem_object *obj); /** * @vmap: * * Returns a virtual address for the buffer. Used by the * drm_gem_dmabuf_vmap() helper. Called with a held GEM reservation * lock. * * This callback is optional. */ int (*vmap)(struct drm_gem_object *obj, struct iosys_map *map); /** * @vunmap: * * Releases the address previously returned by @vmap. Used by the * drm_gem_dmabuf_vunmap() helper. Called with a held GEM reservation * lock. * * This callback is optional. */ void (*vunmap)(struct drm_gem_object *obj, struct iosys_map *map); /** * @mmap: * * Handle mmap() of the gem object, setup vma accordingly. * * This callback is optional. * * The callback is used by both drm_gem_mmap_obj() and * drm_gem_prime_mmap(). When @mmap is present @vm_ops is not * used, the @mmap callback must set vma->vm_ops instead. */ int (*mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma); /** * @evict: * * Evicts gem object out from memory. Used by the drm_gem_object_evict() * helper. Returns 0 on success, -errno otherwise. Called with a held * GEM reservation lock. * * This callback is optional. */ int (*evict)(struct drm_gem_object *obj); /** * @status: * * The optional status callback can return additional object state * which determines which stats the object is counted against. The * callback is called under table_lock. Racing against object status * change is "harmless", and the callback can expect to not race * against object destruction. * * Called by drm_show_memory_stats(). */ enum drm_gem_object_status (*status)(struct drm_gem_object *obj); /** * @rss: * * Return resident size of the object in physical memory. * * Called by drm_show_memory_stats(). */ size_t (*rss)(struct drm_gem_object *obj); /** * @vm_ops: * * Virtual memory operations used with mmap. * * This is optional but necessary for mmap support. */ const struct vm_operations_struct *vm_ops; }; /** * struct drm_gem_lru - A simple LRU helper * * A helper for tracking GEM objects in a given state, to aid in * driver's shrinker implementation. Tracks the count of pages * for lockless &shrinker.count_objects, and provides * &drm_gem_lru_scan for driver's &shrinker.scan_objects * implementation. */ struct drm_gem_lru { /** * @lock: * * Lock protecting movement of GEM objects between LRUs. All * LRUs that the object can move between should be protected * by the same lock. */ struct mutex *lock; /** * @count: * * The total number of backing pages of the GEM objects in * this LRU. */ long count; /** * @list: * * The LRU list. */ struct list_head list; }; /** * struct drm_gem_object - GEM buffer object * * This structure defines the generic parts for GEM buffer objects, which are * mostly around handling mmap and userspace handles. * * Buffer objects are often abbreviated to BO. */ struct drm_gem_object { /** * @refcount: * * Reference count of this object * * Please use drm_gem_object_get() to acquire and drm_gem_object_put_locked() * or drm_gem_object_put() to release a reference to a GEM * buffer object. */ struct kref refcount; /** * @handle_count: * * This is the GEM file_priv handle count of this object. * * Each handle also holds a reference. Note that when the handle_count * drops to 0 any global names (e.g. the id in the flink namespace) will * be cleared. * * Protected by &drm_device.object_name_lock. */ unsigned handle_count; /** * @dev: DRM dev this object belongs to. */ struct drm_device *dev; /** * @filp: * * SHMEM file node used as backing storage for swappable buffer objects. * GEM also supports driver private objects with driver-specific backing * storage (contiguous DMA memory, special reserved blocks). In this * case @filp is NULL. */ struct file *filp; /** * @vma_node: * * Mapping info for this object to support mmap. Drivers are supposed to * allocate the mmap offset using drm_gem_create_mmap_offset(). The * offset itself can be retrieved using drm_vma_node_offset_addr(). * * Memory mapping itself is handled by drm_gem_mmap(), which also checks * that userspace is allowed to access the object. */ struct drm_vma_offset_node vma_node; /** * @size: * * Size of the object, in bytes. Immutable over the object's * lifetime. */ size_t size; /** * @name: * * Global name for this object, starts at 1. 0 means unnamed. * Access is covered by &drm_device.object_name_lock. This is used by * the GEM_FLINK and GEM_OPEN ioctls. */ int name; /** * @dma_buf: * * dma-buf associated with this GEM object. * * Pointer to the dma-buf associated with this gem object (either * through importing or exporting). We break the resulting reference * loop when the last gem handle for this object is released. * * Protected by &drm_device.object_name_lock. */ struct dma_buf *dma_buf; /** * @import_attach: * * dma-buf attachment backing this object. * * Any foreign dma_buf imported as a gem object has this set to the * attachment point for the device. This is invariant over the lifetime * of a gem object. * * The &drm_gem_object_funcs.free callback is responsible for * cleaning up the dma_buf attachment and references acquired at import * time. * * Note that the drm gem/prime core does not depend upon drivers setting * this field any more. So for drivers where this doesn't make sense * (e.g. virtual devices or a displaylink behind an usb bus) they can * simply leave it as NULL. */ struct dma_buf_attachment *import_attach; /** * @resv: * * Pointer to reservation object associated with the this GEM object. * * Normally (@resv == &@_resv) except for imported GEM objects. */ struct dma_resv *resv; /** * @_resv: * * A reservation object for this GEM object. * * This is unused for imported GEM objects. */ struct dma_resv _resv; /** * @gpuva: Fields used by GPUVM to manage mappings pointing to this GEM object. * * When DRM_GPUVM_IMMEDIATE_MODE is set, this list is protected by the * mutex. Otherwise, the list is protected by the GEMs &dma_resv lock. * * Note that all entries in this list must agree on whether * DRM_GPUVM_IMMEDIATE_MODE is set. */ struct { /** * @gpuva.list: list of GPUVM mappings attached to this GEM object. * * Drivers should lock list accesses with either the GEMs * &dma_resv lock (&drm_gem_object.resv) or the * &drm_gem_object.gpuva.lock mutex. */ struct list_head list; /** * @gpuva.lock: lock protecting access to &drm_gem_object.gpuva.list * when DRM_GPUVM_IMMEDIATE_MODE is used. * * Only used when DRM_GPUVM_IMMEDIATE_MODE is set. It should be * safe to take this mutex during the fence signalling path, so * do not allocate memory while holding this lock. Otherwise, * the &dma_resv lock should be used. */ struct mutex lock; } gpuva; /** * @funcs: * * Optional GEM object functions. If this is set, it will be used instead of the * corresponding &drm_driver GEM callbacks. * * New drivers should use this. * */ const struct drm_gem_object_funcs *funcs; /** * @lru_node: * * List node in a &drm_gem_lru. */ struct list_head lru_node; /** * @lru: * * The current LRU list that the GEM object is on. */ struct drm_gem_lru *lru; }; /** * DRM_GEM_FOPS - Default drm GEM file operations * * This macro provides a shorthand for setting the GEM file ops in the * &file_operations structure. If all you need are the default ops, use * DEFINE_DRM_GEM_FOPS instead. */ #define DRM_GEM_FOPS \ .open = drm_open,\ .release = drm_release,\ .unlocked_ioctl = drm_ioctl,\ .compat_ioctl = drm_compat_ioctl,\ .poll = drm_poll,\ .read = drm_read,\ .llseek = noop_llseek,\ .get_unmapped_area = drm_gem_get_unmapped_area,\ .mmap = drm_gem_mmap, \ .fop_flags = FOP_UNSIGNED_OFFSET /** * DEFINE_DRM_GEM_FOPS() - macro to generate file operations for GEM drivers * @name: name for the generated structure * * This macro autogenerates a suitable &struct file_operations for GEM based * drivers, which can be assigned to &drm_driver.fops. Note that this structure * cannot be shared between drivers, because it contains a reference to the * current module using THIS_MODULE. * * Note that the declaration is already marked as static - if you need a * non-static version of this you're probably doing it wrong and will break the * THIS_MODULE reference by accident. */ #define DEFINE_DRM_GEM_FOPS(name) \ static const struct file_operations name = {\ .owner = THIS_MODULE,\ DRM_GEM_FOPS,\ } #ifdef CONFIG_TRANSPARENT_HUGEPAGE int drm_gem_huge_mnt_create(struct drm_device *dev, const char *value); #else static inline int drm_gem_huge_mnt_create(struct drm_device *dev, const char *value) { return 0; } #endif /** * drm_gem_get_huge_mnt - Get the huge tmpfs mountpoint used by a DRM device * @dev: DRM device * * This function gets the huge tmpfs mountpoint used by DRM device @dev. A huge * tmpfs mountpoint is used instead of `shm_mnt` after a successful call to * drm_gem_huge_mnt_create() when CONFIG_TRANSPARENT_HUGEPAGE is enabled. * * Returns: * The huge tmpfs mountpoint in use, NULL otherwise. */ static inline struct vfsmount *drm_gem_get_huge_mnt(struct drm_device *dev) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE return dev->huge_mnt; #else return NULL; #endif } void drm_gem_object_release(struct drm_gem_object *obj); void drm_gem_object_free(struct kref *kref); int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj, size_t size); void drm_gem_private_object_init(struct drm_device *dev, struct drm_gem_object *obj, size_t size); void drm_gem_private_object_fini(struct drm_gem_object *obj); void drm_gem_vm_open(struct vm_area_struct *vma); void drm_gem_vm_close(struct vm_area_struct *vma); int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, struct vm_area_struct *vma); int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); #ifdef CONFIG_MMU unsigned long drm_gem_get_unmapped_area(struct file *filp, unsigned long uaddr, unsigned long len, unsigned long pgoff, unsigned long flags); #else #define drm_gem_get_unmapped_area NULL #endif /** * drm_gem_object_get - acquire a GEM buffer object reference * @obj: GEM buffer object * * This function acquires an additional reference to @obj. It is illegal to * call this without already holding a reference. No locks required. */ static inline void drm_gem_object_get(struct drm_gem_object *obj) { kref_get(&obj->refcount); } __attribute__((nonnull)) static inline void __drm_gem_object_put(struct drm_gem_object *obj) { kref_put(&obj->refcount, drm_gem_object_free); } /** * drm_gem_object_put - drop a GEM buffer object reference * @obj: GEM buffer object * * This releases a reference to @obj. */ static inline void drm_gem_object_put(struct drm_gem_object *obj) { if (obj) __drm_gem_object_put(obj); } int drm_gem_handle_create(struct drm_file *file_priv, struct drm_gem_object *obj, u32 *handlep); int drm_gem_handle_delete(struct drm_file *filp, u32 handle); void drm_gem_free_mmap_offset(struct drm_gem_object *obj); int drm_gem_create_mmap_offset(struct drm_gem_object *obj); int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size); struct page **drm_gem_get_pages(struct drm_gem_object *obj); void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, bool dirty, bool accessed); void drm_gem_lock(struct drm_gem_object *obj); void drm_gem_unlock(struct drm_gem_object *obj); int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map); void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map); int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, int count, struct drm_gem_object ***objs_out); struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle); long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, bool wait_all, unsigned long timeout); int drm_gem_lock_reservations(struct drm_gem_object **objs, int count, struct ww_acquire_ctx *acquire_ctx); void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, struct ww_acquire_ctx *acquire_ctx); int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, u32 handle, u64 *offset); void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock); void drm_gem_lru_remove(struct drm_gem_object *obj); void drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj); void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj); unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned int nr_to_scan, unsigned long *remaining, bool (*shrink)(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket), struct ww_acquire_ctx *ticket); int drm_gem_evict_locked(struct drm_gem_object *obj); /** * drm_gem_object_is_shared_for_memory_stats - helper for shared memory stats * * This helper should only be used for fdinfo shared memory stats to determine * if a GEM object is shared. * * @obj: obj in question */ static inline bool drm_gem_object_is_shared_for_memory_stats(struct drm_gem_object *obj) { return (obj->handle_count > 1) || obj->dma_buf; } /** * drm_gem_is_imported() - Tests if GEM object's buffer has been imported * @obj: the GEM object * * Returns: * True if the GEM object's buffer has been imported, false otherwise */ static inline bool drm_gem_is_imported(const struct drm_gem_object *obj) { return !!obj->import_attach; } #ifdef CONFIG_LOCKDEP #define drm_gem_gpuva_assert_lock_held(gpuvm, obj) \ lockdep_assert(drm_gpuvm_immediate_mode(gpuvm) ? \ lockdep_is_held(&(obj)->gpuva.lock) : \ dma_resv_held((obj)->resv)) #else #define drm_gem_gpuva_assert_lock_held(gpuvm, obj) do {} while (0) #endif /** * drm_gem_gpuva_init() - initialize the gpuva list of a GEM object * @obj: the &drm_gem_object * * This initializes the &drm_gem_object's &drm_gpuvm_bo list. * * Calling this function is only necessary for drivers intending to support the * &drm_driver_feature DRIVER_GEM_GPUVA. * * See also drm_gem_gpuva_set_lock(). */ static inline void drm_gem_gpuva_init(struct drm_gem_object *obj) { INIT_LIST_HEAD(&obj->gpuva.list); } /** * drm_gem_for_each_gpuvm_bo() - iterator to walk over a list of &drm_gpuvm_bo * @entry__: &drm_gpuvm_bo structure to assign to in each iteration step * @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with * * This iterator walks over all &drm_gpuvm_bo structures associated with the * &drm_gem_object. */ #define drm_gem_for_each_gpuvm_bo(entry__, obj__) \ list_for_each_entry(entry__, &(obj__)->gpuva.list, list.entry.gem) /** * drm_gem_for_each_gpuvm_bo_safe() - iterator to safely walk over a list of * &drm_gpuvm_bo * @entry__: &drm_gpuvm_bostructure to assign to in each iteration step * @next__: &next &drm_gpuvm_bo to store the next step * @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with * * This iterator walks over all &drm_gpuvm_bo structures associated with the * &drm_gem_object. It is implemented with list_for_each_entry_safe(), hence * it is save against removal of elements. */ #define drm_gem_for_each_gpuvm_bo_safe(entry__, next__, obj__) \ list_for_each_entry_safe(entry__, next__, &(obj__)->gpuva.list, list.entry.gem) #endif /* __DRM_GEM_H__ */
31 31 31 31 2 31 4 1 3 30 31 173 88 88 49 40 40 173 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 // SPDX-License-Identifier: GPL-2.0-only /* * V9FS cache definitions. * * Copyright (C) 2009 by Abhishek Kulkarni <adkulkar@umail.iu.edu> */ #include <linux/jiffies.h> #include <linux/file.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/sched.h> #include <linux/fs.h> #include <net/9p/9p.h> #include "v9fs.h" #include "cache.h" int v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses, const char *dev_name) { struct fscache_volume *vcookie; char *name, *p; name = kasprintf(GFP_KERNEL, "9p,%s,%s", dev_name, v9ses->cachetag ?: v9ses->aname); if (!name) return -ENOMEM; for (p = name; *p; p++) if (*p == '/') *p = ';'; vcookie = fscache_acquire_volume(name, NULL, NULL, 0); p9_debug(P9_DEBUG_FSC, "session %p get volume %p (%s)\n", v9ses, vcookie, name); if (IS_ERR(vcookie)) { if (vcookie != ERR_PTR(-EBUSY)) { kfree(name); return PTR_ERR(vcookie); } pr_err("Cache volume key already in use (%s)\n", name); vcookie = NULL; } v9ses->fscache = vcookie; kfree(name); return 0; } void v9fs_cache_inode_get_cookie(struct inode *inode) { struct v9fs_inode *v9inode = V9FS_I(inode); struct v9fs_session_info *v9ses; __le32 version; __le64 path; if (!S_ISREG(inode->i_mode)) return; if (WARN_ON(v9fs_inode_cookie(v9inode))) return; version = cpu_to_le32(v9inode->qid.version); path = cpu_to_le64(v9inode->qid.path); v9ses = v9fs_inode2v9ses(inode); v9inode->netfs.cache = fscache_acquire_cookie(v9fs_session_cache(v9ses), 0, &path, sizeof(path), &version, sizeof(version), i_size_read(&v9inode->netfs.inode)); if (v9inode->netfs.cache) mapping_set_release_always(inode->i_mapping); p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n", inode, v9fs_inode_cookie(v9inode)); }
4 4 4 4 4 4 4 4 4 4 4 4 4 3 1 5 5 5 4 1 1 4 4 4 3 3 3 1 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 // SPDX-License-Identifier: GPL-2.0-or-later /* * net/sched/sch_skbprio.c SKB Priority Queue. * * Authors: Nishanth Devarajan, <ndev2021@gmail.com> * Cody Doucette, <doucette@bu.edu> * original idea by Michel Machado, Cody Doucette, and Qiaobin Fu */ #include <linux/string.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <net/pkt_sched.h> #include <net/sch_generic.h> #include <net/inet_ecn.h> /* SKB Priority Queue * ================================= * * Skbprio (SKB Priority Queue) is a queueing discipline that prioritizes * packets according to their skb->priority field. Under congestion, * Skbprio drops already-enqueued lower priority packets to make space * available for higher priority packets; it was conceived as a solution * for denial-of-service defenses that need to route packets with different * priorities as a mean to overcome DoS attacks. */ struct skbprio_sched_data { /* Queue state. */ struct sk_buff_head qdiscs[SKBPRIO_MAX_PRIORITY]; struct gnet_stats_queue qstats[SKBPRIO_MAX_PRIORITY]; u16 highest_prio; u16 lowest_prio; }; static u16 calc_new_high_prio(const struct skbprio_sched_data *q) { int prio; for (prio = q->highest_prio - 1; prio >= q->lowest_prio; prio--) { if (!skb_queue_empty(&q->qdiscs[prio])) return prio; } /* SKB queue is empty, return 0 (default highest priority setting). */ return 0; } static u16 calc_new_low_prio(const struct skbprio_sched_data *q) { int prio; for (prio = q->lowest_prio + 1; prio <= q->highest_prio; prio++) { if (!skb_queue_empty(&q->qdiscs[prio])) return prio; } /* SKB queue is empty, return SKBPRIO_MAX_PRIORITY - 1 * (default lowest priority setting). */ return SKBPRIO_MAX_PRIORITY - 1; } static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { const unsigned int max_priority = SKBPRIO_MAX_PRIORITY - 1; struct skbprio_sched_data *q = qdisc_priv(sch); struct sk_buff_head *qdisc; struct sk_buff_head *lp_qdisc; struct sk_buff *to_drop; u16 prio, lp; /* Obtain the priority of @skb. */ prio = min(skb->priority, max_priority); qdisc = &q->qdiscs[prio]; /* sch->limit can change under us from skbprio_change() */ if (sch->q.qlen < READ_ONCE(sch->limit)) { __skb_queue_tail(qdisc, skb); qdisc_qstats_backlog_inc(sch, skb); q->qstats[prio].backlog += qdisc_pkt_len(skb); /* Check to update highest and lowest priorities. */ if (prio > q->highest_prio) q->highest_prio = prio; if (prio < q->lowest_prio) q->lowest_prio = prio; sch->q.qlen++; return NET_XMIT_SUCCESS; } /* If this packet has the lowest priority, drop it. */ lp = q->lowest_prio; if (prio <= lp) { q->qstats[prio].drops++; q->qstats[prio].overlimits++; return qdisc_drop(skb, sch, to_free); } __skb_queue_tail(qdisc, skb); qdisc_qstats_backlog_inc(sch, skb); q->qstats[prio].backlog += qdisc_pkt_len(skb); /* Drop the packet at the tail of the lowest priority qdisc. */ lp_qdisc = &q->qdiscs[lp]; to_drop = __skb_dequeue_tail(lp_qdisc); BUG_ON(!to_drop); qdisc_qstats_backlog_dec(sch, to_drop); qdisc_drop(to_drop, sch, to_free); q->qstats[lp].backlog -= qdisc_pkt_len(to_drop); q->qstats[lp].drops++; q->qstats[lp].overlimits++; /* Check to update highest and lowest priorities. */ if (skb_queue_empty(lp_qdisc)) { if (q->lowest_prio == q->highest_prio) { q->lowest_prio = prio; q->highest_prio = prio; } else { q->lowest_prio = calc_new_low_prio(q); } } if (prio > q->highest_prio) q->highest_prio = prio; return NET_XMIT_CN; } static struct sk_buff *skbprio_dequeue(struct Qdisc *sch) { struct skbprio_sched_data *q = qdisc_priv(sch); struct sk_buff_head *hpq = &q->qdiscs[q->highest_prio]; struct sk_buff *skb = __skb_dequeue(hpq); if (unlikely(!skb)) return NULL; sch->q.qlen--; qdisc_qstats_backlog_dec(sch, skb); qdisc_bstats_update(sch, skb); q->qstats[q->highest_prio].backlog -= qdisc_pkt_len(skb); /* Update highest priority field. */ if (skb_queue_empty(hpq)) { if (q->lowest_prio == q->highest_prio) { q->highest_prio = 0; q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1; } else { q->highest_prio = calc_new_high_prio(q); } } return skb; } static int skbprio_change(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct tc_skbprio_qopt *ctl = nla_data(opt); if (opt->nla_len != nla_attr_size(sizeof(*ctl))) return -EINVAL; WRITE_ONCE(sch->limit, ctl->limit); return 0; } static int skbprio_init(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct skbprio_sched_data *q = qdisc_priv(sch); int prio; /* Initialise all queues, one for each possible priority. */ for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++) __skb_queue_head_init(&q->qdiscs[prio]); memset(&q->qstats, 0, sizeof(q->qstats)); q->highest_prio = 0; q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1; sch->limit = 64; if (!opt) return 0; return skbprio_change(sch, opt, extack); } static int skbprio_dump(struct Qdisc *sch, struct sk_buff *skb) { struct tc_skbprio_qopt opt; opt.limit = READ_ONCE(sch->limit); if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) return -1; return skb->len; } static void skbprio_reset(struct Qdisc *sch) { struct skbprio_sched_data *q = qdisc_priv(sch); int prio; for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++) __skb_queue_purge(&q->qdiscs[prio]); memset(&q->qstats, 0, sizeof(q->qstats)); q->highest_prio = 0; q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1; } static void skbprio_destroy(struct Qdisc *sch) { struct skbprio_sched_data *q = qdisc_priv(sch); int prio; for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++) __skb_queue_purge(&q->qdiscs[prio]); } static struct Qdisc *skbprio_leaf(struct Qdisc *sch, unsigned long arg) { return NULL; } static unsigned long skbprio_find(struct Qdisc *sch, u32 classid) { return 0; } static int skbprio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { tcm->tcm_handle |= TC_H_MIN(cl); return 0; } static int skbprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, struct gnet_dump *d) { struct skbprio_sched_data *q = qdisc_priv(sch); if (gnet_stats_copy_queue(d, NULL, &q->qstats[cl - 1], q->qstats[cl - 1].qlen) < 0) return -1; return 0; } static void skbprio_walk(struct Qdisc *sch, struct qdisc_walker *arg) { unsigned int i; if (arg->stop) return; for (i = 0; i < SKBPRIO_MAX_PRIORITY; i++) { if (!tc_qdisc_stats_dump(sch, i + 1, arg)) break; } } static const struct Qdisc_class_ops skbprio_class_ops = { .leaf = skbprio_leaf, .find = skbprio_find, .dump = skbprio_dump_class, .dump_stats = skbprio_dump_class_stats, .walk = skbprio_walk, }; static struct Qdisc_ops skbprio_qdisc_ops __read_mostly = { .cl_ops = &skbprio_class_ops, .id = "skbprio", .priv_size = sizeof(struct skbprio_sched_data), .enqueue = skbprio_enqueue, .dequeue = skbprio_dequeue, .peek = qdisc_peek_dequeued, .init = skbprio_init, .reset = skbprio_reset, .change = skbprio_change, .dump = skbprio_dump, .destroy = skbprio_destroy, .owner = THIS_MODULE, }; MODULE_ALIAS_NET_SCH("skbprio"); static int __init skbprio_module_init(void) { return register_qdisc(&skbprio_qdisc_ops); } static void __exit skbprio_module_exit(void) { unregister_qdisc(&skbprio_qdisc_ops); } module_init(skbprio_module_init) module_exit(skbprio_module_exit) MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SKB priority based scheduling qdisc");
50 50 50 50 49 49 35 35 50 47 46 35 46 46 38 35 44 42 5 4 37 37 37 37 37 43 42 42 42 39 41 42 42 41 42 42 42 42 35 42 42 40 40 42 35 35 35 35 35 50 35 35 35 35 35 35 35 35 35 35 35 35 39 35 35 35 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 // SPDX-License-Identifier: GPL-2.0-or-later /* Decoder for ASN.1 BER/DER/CER encoded bytestream * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #include <linux/export.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/asn1_decoder.h> #include <linux/asn1_ber_bytecode.h> static const unsigned char asn1_op_lengths[ASN1_OP__NR] = { /* OPC TAG JMP ACT */ [ASN1_OP_MATCH] = 1 + 1, [ASN1_OP_MATCH_OR_SKIP] = 1 + 1, [ASN1_OP_MATCH_ACT] = 1 + 1 + 1, [ASN1_OP_MATCH_ACT_OR_SKIP] = 1 + 1 + 1, [ASN1_OP_MATCH_JUMP] = 1 + 1 + 1, [ASN1_OP_MATCH_JUMP_OR_SKIP] = 1 + 1 + 1, [ASN1_OP_MATCH_ANY] = 1, [ASN1_OP_MATCH_ANY_OR_SKIP] = 1, [ASN1_OP_MATCH_ANY_ACT] = 1 + 1, [ASN1_OP_MATCH_ANY_ACT_OR_SKIP] = 1 + 1, [ASN1_OP_COND_MATCH_OR_SKIP] = 1 + 1, [ASN1_OP_COND_MATCH_ACT_OR_SKIP] = 1 + 1 + 1, [ASN1_OP_COND_MATCH_JUMP_OR_SKIP] = 1 + 1 + 1, [ASN1_OP_COND_MATCH_ANY] = 1, [ASN1_OP_COND_MATCH_ANY_OR_SKIP] = 1, [ASN1_OP_COND_MATCH_ANY_ACT] = 1 + 1, [ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP] = 1 + 1, [ASN1_OP_COND_FAIL] = 1, [ASN1_OP_COMPLETE] = 1, [ASN1_OP_ACT] = 1 + 1, [ASN1_OP_MAYBE_ACT] = 1 + 1, [ASN1_OP_RETURN] = 1, [ASN1_OP_END_SEQ] = 1, [ASN1_OP_END_SEQ_OF] = 1 + 1, [ASN1_OP_END_SET] = 1, [ASN1_OP_END_SET_OF] = 1 + 1, [ASN1_OP_END_SEQ_ACT] = 1 + 1, [ASN1_OP_END_SEQ_OF_ACT] = 1 + 1 + 1, [ASN1_OP_END_SET_ACT] = 1 + 1, [ASN1_OP_END_SET_OF_ACT] = 1 + 1 + 1, }; /* * Find the length of an indefinite length object * @data: The data buffer * @datalen: The end of the innermost containing element in the buffer * @_dp: The data parse cursor (updated before returning) * @_len: Where to return the size of the element. * @_errmsg: Where to return a pointer to an error message on error */ static int asn1_find_indefinite_length(const unsigned char *data, size_t datalen, size_t *_dp, size_t *_len, const char **_errmsg) { unsigned char tag, tmp; size_t dp = *_dp, len, n; int indef_level = 1; next_tag: if (unlikely(datalen - dp < 2)) { if (datalen == dp) goto missing_eoc; goto data_overrun_error; } /* Extract a tag from the data */ tag = data[dp++]; if (tag == ASN1_EOC) { /* It appears to be an EOC. */ if (data[dp++] != 0) goto invalid_eoc; if (--indef_level <= 0) { *_len = dp - *_dp; *_dp = dp; return 0; } goto next_tag; } if (unlikely((tag & 0x1f) == ASN1_LONG_TAG)) { do { if (unlikely(datalen - dp < 2)) goto data_overrun_error; tmp = data[dp++]; } while (tmp & 0x80); } /* Extract the length */ len = data[dp++]; if (len <= 0x7f) goto check_length; if (unlikely(len == ASN1_INDEFINITE_LENGTH)) { /* Indefinite length */ if (unlikely((tag & ASN1_CONS_BIT) == ASN1_PRIM << 5)) goto indefinite_len_primitive; indef_level++; goto next_tag; } n = len - 0x80; if (unlikely(n > sizeof(len) - 1)) goto length_too_long; if (unlikely(n > datalen - dp)) goto data_overrun_error; len = 0; for (; n > 0; n--) { len <<= 8; len |= data[dp++]; } check_length: if (len > datalen - dp) goto data_overrun_error; dp += len; goto next_tag; length_too_long: *_errmsg = "Unsupported length"; goto error; indefinite_len_primitive: *_errmsg = "Indefinite len primitive not permitted"; goto error; invalid_eoc: *_errmsg = "Invalid length EOC"; goto error; data_overrun_error: *_errmsg = "Data overrun error"; goto error; missing_eoc: *_errmsg = "Missing EOC in indefinite len cons"; error: *_dp = dp; return -1; } /** * asn1_ber_decoder - Decoder BER/DER/CER ASN.1 according to pattern * @decoder: The decoder definition (produced by asn1_compiler) * @context: The caller's context (to be passed to the action functions) * @data: The encoded data * @datalen: The size of the encoded data * * Decode BER/DER/CER encoded ASN.1 data according to a bytecode pattern * produced by asn1_compiler. Action functions are called on marked tags to * allow the caller to retrieve significant data. * * LIMITATIONS: * * To keep down the amount of stack used by this function, the following limits * have been imposed: * * (1) This won't handle datalen > 65535 without increasing the size of the * cons stack elements and length_too_long checking. * * (2) The stack of constructed types is 10 deep. If the depth of non-leaf * constructed types exceeds this, the decode will fail. * * (3) The SET type (not the SET OF type) isn't really supported as tracking * what members of the set have been seen is a pain. */ int asn1_ber_decoder(const struct asn1_decoder *decoder, void *context, const unsigned char *data, size_t datalen) { const unsigned char *machine = decoder->machine; const asn1_action_t *actions = decoder->actions; size_t machlen = decoder->machlen; enum asn1_opcode op; unsigned char tag = 0, csp = 0, jsp = 0, optag = 0, hdr = 0; const char *errmsg; size_t pc = 0, dp = 0, tdp = 0, len = 0; int ret; unsigned char flags = 0; #define FLAG_INDEFINITE_LENGTH 0x01 #define FLAG_MATCHED 0x02 #define FLAG_LAST_MATCHED 0x04 /* Last tag matched */ #define FLAG_CONS 0x20 /* Corresponds to CONS bit in the opcode tag * - ie. whether or not we are going to parse * a compound type. */ #define NR_CONS_STACK 10 unsigned short cons_dp_stack[NR_CONS_STACK]; unsigned short cons_datalen_stack[NR_CONS_STACK]; unsigned char cons_hdrlen_stack[NR_CONS_STACK]; #define NR_JUMP_STACK 10 unsigned char jump_stack[NR_JUMP_STACK]; if (datalen > 65535) return -EMSGSIZE; next_op: pr_debug("next_op: pc=\e[32m%zu\e[m/%zu dp=\e[33m%zu\e[m/%zu C=%d J=%d\n", pc, machlen, dp, datalen, csp, jsp); if (unlikely(pc >= machlen)) goto machine_overrun_error; op = machine[pc]; if (unlikely(pc + asn1_op_lengths[op] > machlen)) goto machine_overrun_error; /* If this command is meant to match a tag, then do that before * evaluating the command. */ if (op <= ASN1_OP__MATCHES_TAG) { unsigned char tmp; /* Skip conditional matches if possible */ if ((op & ASN1_OP_MATCH__COND && flags & FLAG_MATCHED) || (op & ASN1_OP_MATCH__SKIP && dp == datalen)) { flags &= ~FLAG_LAST_MATCHED; pc += asn1_op_lengths[op]; goto next_op; } flags = 0; hdr = 2; /* Extract a tag from the data */ if (unlikely(datalen - dp < 2)) goto data_overrun_error; tag = data[dp++]; if (unlikely((tag & 0x1f) == ASN1_LONG_TAG)) goto long_tag_not_supported; if (op & ASN1_OP_MATCH__ANY) { pr_debug("- any %02x\n", tag); } else { /* Extract the tag from the machine * - Either CONS or PRIM are permitted in the data if * CONS is not set in the op stream, otherwise CONS * is mandatory. */ optag = machine[pc + 1]; flags |= optag & FLAG_CONS; /* Determine whether the tag matched */ tmp = optag ^ tag; tmp &= ~(optag & ASN1_CONS_BIT); pr_debug("- match? %02x %02x %02x\n", tag, optag, tmp); if (tmp != 0) { /* All odd-numbered tags are MATCH_OR_SKIP. */ if (op & ASN1_OP_MATCH__SKIP) { pc += asn1_op_lengths[op]; dp--; goto next_op; } goto tag_mismatch; } } flags |= FLAG_MATCHED; len = data[dp++]; if (len > 0x7f) { if (unlikely(len == ASN1_INDEFINITE_LENGTH)) { /* Indefinite length */ if (unlikely(!(tag & ASN1_CONS_BIT))) goto indefinite_len_primitive; flags |= FLAG_INDEFINITE_LENGTH; if (unlikely(2 > datalen - dp)) goto data_overrun_error; } else { int n = len - 0x80; if (unlikely(n > 2)) goto length_too_long; if (unlikely(n > datalen - dp)) goto data_overrun_error; hdr += n; for (len = 0; n > 0; n--) { len <<= 8; len |= data[dp++]; } if (unlikely(len > datalen - dp)) goto data_overrun_error; } } else { if (unlikely(len > datalen - dp)) goto data_overrun_error; } if (flags & FLAG_CONS) { /* For expected compound forms, we stack the positions * of the start and end of the data. */ if (unlikely(csp >= NR_CONS_STACK)) goto cons_stack_overflow; cons_dp_stack[csp] = dp; cons_hdrlen_stack[csp] = hdr; if (!(flags & FLAG_INDEFINITE_LENGTH)) { cons_datalen_stack[csp] = datalen; datalen = dp + len; } else { cons_datalen_stack[csp] = 0; } csp++; } pr_debug("- TAG: %02x %zu%s\n", tag, len, flags & FLAG_CONS ? " CONS" : ""); tdp = dp; } /* Decide how to handle the operation */ switch (op) { case ASN1_OP_MATCH: case ASN1_OP_MATCH_OR_SKIP: case ASN1_OP_MATCH_ACT: case ASN1_OP_MATCH_ACT_OR_SKIP: case ASN1_OP_MATCH_ANY: case ASN1_OP_MATCH_ANY_OR_SKIP: case ASN1_OP_MATCH_ANY_ACT: case ASN1_OP_MATCH_ANY_ACT_OR_SKIP: case ASN1_OP_COND_MATCH_OR_SKIP: case ASN1_OP_COND_MATCH_ACT_OR_SKIP: case ASN1_OP_COND_MATCH_ANY: case ASN1_OP_COND_MATCH_ANY_OR_SKIP: case ASN1_OP_COND_MATCH_ANY_ACT: case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP: if (!(flags & FLAG_CONS)) { if (flags & FLAG_INDEFINITE_LENGTH) { size_t tmp = dp; ret = asn1_find_indefinite_length( data, datalen, &tmp, &len, &errmsg); if (ret < 0) goto error; } pr_debug("- LEAF: %zu\n", len); } if (op & ASN1_OP_MATCH__ACT) { unsigned char act; if (op & ASN1_OP_MATCH__ANY) act = machine[pc + 1]; else act = machine[pc + 2]; ret = actions[act](context, hdr, tag, data + dp, len); if (ret < 0) return ret; } if (!(flags & FLAG_CONS)) dp += len; pc += asn1_op_lengths[op]; goto next_op; case ASN1_OP_MATCH_JUMP: case ASN1_OP_MATCH_JUMP_OR_SKIP: case ASN1_OP_COND_MATCH_JUMP_OR_SKIP: pr_debug("- MATCH_JUMP\n"); if (unlikely(jsp == NR_JUMP_STACK)) goto jump_stack_overflow; jump_stack[jsp++] = pc + asn1_op_lengths[op]; pc = machine[pc + 2]; goto next_op; case ASN1_OP_COND_FAIL: if (unlikely(!(flags & FLAG_MATCHED))) goto tag_mismatch; pc += asn1_op_lengths[op]; goto next_op; case ASN1_OP_COMPLETE: if (unlikely(jsp != 0 || csp != 0)) { pr_err("ASN.1 decoder error: Stacks not empty at completion (%u, %u)\n", jsp, csp); return -EBADMSG; } return 0; case ASN1_OP_END_SET: case ASN1_OP_END_SET_ACT: if (unlikely(!(flags & FLAG_MATCHED))) goto tag_mismatch; fallthrough; case ASN1_OP_END_SEQ: case ASN1_OP_END_SET_OF: case ASN1_OP_END_SEQ_OF: case ASN1_OP_END_SEQ_ACT: case ASN1_OP_END_SET_OF_ACT: case ASN1_OP_END_SEQ_OF_ACT: if (unlikely(csp <= 0)) goto cons_stack_underflow; csp--; tdp = cons_dp_stack[csp]; hdr = cons_hdrlen_stack[csp]; len = datalen; datalen = cons_datalen_stack[csp]; pr_debug("- end cons t=%zu dp=%zu l=%zu/%zu\n", tdp, dp, len, datalen); if (datalen == 0) { /* Indefinite length - check for the EOC. */ datalen = len; if (unlikely(datalen - dp < 2)) goto data_overrun_error; if (data[dp++] != 0) { if (op & ASN1_OP_END__OF) { dp--; csp++; pc = machine[pc + 1]; pr_debug("- continue\n"); goto next_op; } goto missing_eoc; } if (data[dp++] != 0) goto invalid_eoc; len = dp - tdp - 2; } else { if (dp < len && (op & ASN1_OP_END__OF)) { datalen = len; csp++; pc = machine[pc + 1]; pr_debug("- continue\n"); goto next_op; } if (dp != len) goto cons_length_error; len -= tdp; pr_debug("- cons len l=%zu d=%zu\n", len, dp - tdp); } if (op & ASN1_OP_END__ACT) { unsigned char act; if (op & ASN1_OP_END__OF) act = machine[pc + 2]; else act = machine[pc + 1]; ret = actions[act](context, hdr, 0, data + tdp, len); if (ret < 0) return ret; } pc += asn1_op_lengths[op]; goto next_op; case ASN1_OP_MAYBE_ACT: if (!(flags & FLAG_LAST_MATCHED)) { pc += asn1_op_lengths[op]; goto next_op; } fallthrough; case ASN1_OP_ACT: ret = actions[machine[pc + 1]](context, hdr, tag, data + tdp, len); if (ret < 0) return ret; pc += asn1_op_lengths[op]; goto next_op; case ASN1_OP_RETURN: if (unlikely(jsp <= 0)) goto jump_stack_underflow; pc = jump_stack[--jsp]; flags |= FLAG_MATCHED | FLAG_LAST_MATCHED; goto next_op; default: break; } /* Shouldn't reach here */ pr_err("ASN.1 decoder error: Found reserved opcode (%u) pc=%zu\n", op, pc); return -EBADMSG; data_overrun_error: errmsg = "Data overrun error"; goto error; machine_overrun_error: errmsg = "Machine overrun error"; goto error; jump_stack_underflow: errmsg = "Jump stack underflow"; goto error; jump_stack_overflow: errmsg = "Jump stack overflow"; goto error; cons_stack_underflow: errmsg = "Cons stack underflow"; goto error; cons_stack_overflow: errmsg = "Cons stack overflow"; goto error; cons_length_error: errmsg = "Cons length error"; goto error; missing_eoc: errmsg = "Missing EOC in indefinite len cons"; goto error; invalid_eoc: errmsg = "Invalid length EOC"; goto error; length_too_long: errmsg = "Unsupported length"; goto error; indefinite_len_primitive: errmsg = "Indefinite len primitive not permitted"; goto error; tag_mismatch: errmsg = "Unexpected tag"; goto error; long_tag_not_supported: errmsg = "Long tag not supported"; error: pr_debug("\nASN1: %s [m=%zu d=%zu ot=%02x t=%02x l=%zu]\n", errmsg, pc, dp, optag, tag, len); return -EBADMSG; } EXPORT_SYMBOL_GPL(asn1_ber_decoder); MODULE_DESCRIPTION("Decoder for ASN.1 BER/DER/CER encoded bytestream"); MODULE_LICENSE("GPL");
1 65 1 1 1 1 1 1 1 108 47 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 // SPDX-License-Identifier: GPL-2.0-or-later /* * Bridge per vlan tunnel port dst_metadata handling code * * Authors: * Roopa Prabhu <roopa@cumulusnetworks.com> */ #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/rtnetlink.h> #include <linux/slab.h> #include <net/switchdev.h> #include <net/dst_metadata.h> #include "br_private.h" #include "br_private_tunnel.h" static inline int br_vlan_tunid_cmp(struct rhashtable_compare_arg *arg, const void *ptr) { const struct net_bridge_vlan *vle = ptr; __be64 tunid = *(__be64 *)arg->key; return vle->tinfo.tunnel_id != tunid; } static const struct rhashtable_params br_vlan_tunnel_rht_params = { .head_offset = offsetof(struct net_bridge_vlan, tnode), .key_offset = offsetof(struct net_bridge_vlan, tinfo.tunnel_id), .key_len = sizeof(__be64), .nelem_hint = 3, .obj_cmpfn = br_vlan_tunid_cmp, .automatic_shrinking = true, }; static struct net_bridge_vlan *br_vlan_tunnel_lookup(struct rhashtable *tbl, __be64 tunnel_id) { return rhashtable_lookup_fast(tbl, &tunnel_id, br_vlan_tunnel_rht_params); } static void vlan_tunnel_info_release(struct net_bridge_vlan *vlan) { struct metadata_dst *tdst = rtnl_dereference(vlan->tinfo.tunnel_dst); WRITE_ONCE(vlan->tinfo.tunnel_id, 0); RCU_INIT_POINTER(vlan->tinfo.tunnel_dst, NULL); dst_release(&tdst->dst); } void vlan_tunnel_info_del(struct net_bridge_vlan_group *vg, struct net_bridge_vlan *vlan) { if (!rcu_access_pointer(vlan->tinfo.tunnel_dst)) return; rhashtable_remove_fast(&vg->tunnel_hash, &vlan->tnode, br_vlan_tunnel_rht_params); vlan_tunnel_info_release(vlan); } static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg, struct net_bridge_vlan *vlan, u32 tun_id) { struct metadata_dst *metadata = rtnl_dereference(vlan->tinfo.tunnel_dst); __be64 key = key32_to_tunnel_id(cpu_to_be32(tun_id)); IP_TUNNEL_DECLARE_FLAGS(flags) = { }; int err; if (metadata) return -EEXIST; __set_bit(IP_TUNNEL_KEY_BIT, flags); metadata = __ip_tun_set_dst(0, 0, 0, 0, 0, flags, key, 0); if (!metadata) return -EINVAL; metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_BRIDGE; rcu_assign_pointer(vlan->tinfo.tunnel_dst, metadata); WRITE_ONCE(vlan->tinfo.tunnel_id, key); err = rhashtable_lookup_insert_fast(&vg->tunnel_hash, &vlan->tnode, br_vlan_tunnel_rht_params); if (err) goto out; return 0; out: vlan_tunnel_info_release(vlan); return err; } /* Must be protected by RTNL. * Must be called with vid in range from 1 to 4094 inclusive. */ int nbp_vlan_tunnel_info_add(const struct net_bridge_port *port, u16 vid, u32 tun_id) { struct net_bridge_vlan_group *vg; struct net_bridge_vlan *vlan; ASSERT_RTNL(); vg = nbp_vlan_group(port); vlan = br_vlan_find(vg, vid); if (!vlan) return -EINVAL; return __vlan_tunnel_info_add(vg, vlan, tun_id); } /* Must be protected by RTNL. * Must be called with vid in range from 1 to 4094 inclusive. */ int nbp_vlan_tunnel_info_delete(const struct net_bridge_port *port, u16 vid) { struct net_bridge_vlan_group *vg; struct net_bridge_vlan *v; ASSERT_RTNL(); vg = nbp_vlan_group(port); v = br_vlan_find(vg, vid); if (!v) return -ENOENT; vlan_tunnel_info_del(vg, v); return 0; } static void __vlan_tunnel_info_flush(struct net_bridge_vlan_group *vg) { struct net_bridge_vlan *vlan, *tmp; list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist) vlan_tunnel_info_del(vg, vlan); } void nbp_vlan_tunnel_info_flush(struct net_bridge_port *port) { struct net_bridge_vlan_group *vg; ASSERT_RTNL(); vg = nbp_vlan_group(port); __vlan_tunnel_info_flush(vg); } int vlan_tunnel_init(struct net_bridge_vlan_group *vg) { return rhashtable_init(&vg->tunnel_hash, &br_vlan_tunnel_rht_params); } void vlan_tunnel_deinit(struct net_bridge_vlan_group *vg) { rhashtable_destroy(&vg->tunnel_hash); } void br_handle_ingress_vlan_tunnel(struct sk_buff *skb, struct net_bridge_port *p, struct net_bridge_vlan_group *vg) { struct ip_tunnel_info *tinfo = skb_tunnel_info(skb); struct net_bridge_vlan *vlan; if (!vg || !tinfo) return; /* if already tagged, ignore */ if (skb_vlan_tagged(skb)) return; /* lookup vid, given tunnel id */ vlan = br_vlan_tunnel_lookup(&vg->tunnel_hash, tinfo->key.tun_id); if (!vlan) return; skb_dst_drop(skb); __vlan_hwaccel_put_tag(skb, p->br->vlan_proto, vlan->vid); } int br_handle_egress_vlan_tunnel(struct sk_buff *skb, struct net_bridge_vlan *vlan) { IP_TUNNEL_DECLARE_FLAGS(flags) = { }; struct metadata_dst *tunnel_dst; __be64 tunnel_id; if (!vlan) return 0; tunnel_id = READ_ONCE(vlan->tinfo.tunnel_id); if (!tunnel_id || unlikely(!skb_vlan_tag_present(skb))) return 0; skb_dst_drop(skb); /* For 802.1ad (QinQ), skb_vlan_pop() incorrectly moves the C-VLAN * from payload to hwaccel after clearing S-VLAN. We only need to * clear the hwaccel S-VLAN; the C-VLAN must stay in payload for * correct VXLAN encapsulation. This is also correct for 802.1Q * where no C-VLAN exists in payload. */ __vlan_hwaccel_clear_tag(skb); if (BR_INPUT_SKB_CB(skb)->backup_nhid) { __set_bit(IP_TUNNEL_KEY_BIT, flags); tunnel_dst = __ip_tun_set_dst(0, 0, 0, 0, 0, flags, tunnel_id, 0); if (!tunnel_dst) return -ENOMEM; tunnel_dst->u.tun_info.mode |= IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_BRIDGE; tunnel_dst->u.tun_info.key.nhid = BR_INPUT_SKB_CB(skb)->backup_nhid; skb_dst_set(skb, &tunnel_dst->dst); return 0; } tunnel_dst = rcu_dereference(vlan->tinfo.tunnel_dst); if (tunnel_dst && dst_hold_safe(&tunnel_dst->dst)) skb_dst_set(skb, &tunnel_dst->dst); return 0; }
17 2 15 2 8 92 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 /* SPDX-License-Identifier: GPL-2.0-only */ /* * VMware VMCI driver (vmciContext.h) * * Copyright (C) 2012 VMware, Inc. All rights reserved. */ #ifndef _VMCI_CONTEXT_H_ #define _VMCI_CONTEXT_H_ #include <linux/vmw_vmci_defs.h> #include <linux/atomic.h> #include <linux/kref.h> #include <linux/types.h> #include <linux/wait.h> #include "vmci_handle_array.h" #include "vmci_datagram.h" /* Used to determine what checkpoint state to get and set. */ enum { VMCI_NOTIFICATION_CPT_STATE = 1, VMCI_WELLKNOWN_CPT_STATE = 2, VMCI_DG_OUT_STATE = 3, VMCI_DG_IN_STATE = 4, VMCI_DG_IN_SIZE_STATE = 5, VMCI_DOORBELL_CPT_STATE = 6, }; /* Host specific struct used for signalling */ struct vmci_host { wait_queue_head_t wait_queue; }; struct vmci_handle_list { struct list_head node; struct vmci_handle handle; }; struct vmci_ctx { struct list_head list_item; /* For global VMCI list. */ u32 cid; struct kref kref; struct list_head datagram_queue; /* Head of per VM queue. */ u32 pending_datagrams; size_t datagram_queue_size; /* Size of datagram queue in bytes. */ /* * Version of the code that created * this context; e.g., VMX. */ int user_version; spinlock_t lock; /* Locks callQueue and handle_arrays. */ /* * queue_pairs attached to. The array of * handles for queue pairs is accessed * from the code for QP API, and there * it is protected by the QP lock. It * is also accessed from the context * clean up path, which does not * require a lock. VMCILock is not * used to protect the QP array field. */ struct vmci_handle_arr *queue_pair_array; /* Doorbells created by context. */ struct vmci_handle_arr *doorbell_array; /* Doorbells pending for context. */ struct vmci_handle_arr *pending_doorbell_array; /* Contexts current context is subscribing to. */ struct list_head notifier_list; unsigned int n_notifiers; struct vmci_host host_context; u32 priv_flags; const struct cred *cred; bool *notify; /* Notify flag pointer - hosted only. */ struct page *notify_page; /* Page backing the notify UVA. */ }; /* VMCINotifyAddRemoveInfo: Used to add/remove remote context notifications. */ struct vmci_ctx_info { u32 remote_cid; int result; }; /* VMCICptBufInfo: Used to set/get current context's checkpoint state. */ struct vmci_ctx_chkpt_buf_info { u64 cpt_buf; u32 cpt_type; u32 buf_size; s32 result; u32 _pad; }; /* * VMCINotificationReceiveInfo: Used to receive pending notifications * for doorbells and queue pairs. */ struct vmci_ctx_notify_recv_info { u64 db_handle_buf_uva; u64 db_handle_buf_size; u64 qp_handle_buf_uva; u64 qp_handle_buf_size; s32 result; u32 _pad; }; /* * Utilility function that checks whether two entities are allowed * to interact. If one of them is restricted, the other one must * be trusted. */ static inline bool vmci_deny_interaction(u32 part_one, u32 part_two) { return ((part_one & VMCI_PRIVILEGE_FLAG_RESTRICTED) && !(part_two & VMCI_PRIVILEGE_FLAG_TRUSTED)) || ((part_two & VMCI_PRIVILEGE_FLAG_RESTRICTED) && !(part_one & VMCI_PRIVILEGE_FLAG_TRUSTED)); } struct vmci_ctx *vmci_ctx_create(u32 cid, u32 flags, uintptr_t event_hnd, int version, const struct cred *cred); void vmci_ctx_destroy(struct vmci_ctx *context); bool vmci_ctx_supports_host_qp(struct vmci_ctx *context); int vmci_ctx_enqueue_datagram(u32 cid, struct vmci_datagram *dg); int vmci_ctx_dequeue_datagram(struct vmci_ctx *context, size_t *max_size, struct vmci_datagram **dg); struct vmci_ctx *vmci_ctx_get(u32 cid); void vmci_ctx_put(struct vmci_ctx *context); bool vmci_ctx_exists(u32 cid); int vmci_ctx_add_notification(u32 context_id, u32 remote_cid); int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid); int vmci_ctx_get_chkpt_state(u32 context_id, u32 cpt_type, u32 *num_cids, void **cpt_buf_ptr); int vmci_ctx_set_chkpt_state(u32 context_id, u32 cpt_type, u32 num_cids, void *cpt_buf); int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle); int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle); bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle); void vmci_ctx_check_signal_notify(struct vmci_ctx *context); void vmci_ctx_unset_notify(struct vmci_ctx *context); int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle); int vmci_ctx_dbell_destroy(u32 context_id, struct vmci_handle handle); int vmci_ctx_notify_dbell(u32 cid, struct vmci_handle handle, u32 src_priv_flags); int vmci_ctx_rcv_notifications_get(u32 context_id, struct vmci_handle_arr **db_handle_array, struct vmci_handle_arr **qp_handle_array); void vmci_ctx_rcv_notifications_release(u32 context_id, struct vmci_handle_arr *db_handle_array, struct vmci_handle_arr *qp_handle_array, bool success); static inline u32 vmci_ctx_get_id(struct vmci_ctx *context) { if (!context) return VMCI_INVALID_ID; return context->cid; } #endif /* _VMCI_CONTEXT_H_ */
2 2 2 1 2 3 1 2 1 1 1 1 1 3 109 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 // SPDX-License-Identifier: GPL-2.0-only /* * Generic netlink handshake service * * Author: Chuck Lever <chuck.lever@oracle.com> * * Copyright (c) 2023, Oracle and/or its affiliates. */ #include <linux/types.h> #include <linux/socket.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/mm.h> #include <net/sock.h> #include <net/genetlink.h> #include <net/netns/generic.h> #include <kunit/visibility.h> #include <uapi/linux/handshake.h> #include "handshake.h" #include "genl.h" #include <trace/events/handshake.h> /** * handshake_genl_notify - Notify handlers that a request is waiting * @net: target network namespace * @proto: handshake protocol * @flags: memory allocation control flags * * Returns zero on success or a negative errno if notification failed. */ int handshake_genl_notify(struct net *net, const struct handshake_proto *proto, gfp_t flags) { struct sk_buff *msg; void *hdr; /* Disable notifications during unit testing */ if (!test_bit(HANDSHAKE_F_PROTO_NOTIFY, &proto->hp_flags)) return 0; if (!genl_has_listeners(&handshake_nl_family, net, proto->hp_handler_class)) return -ESRCH; msg = genlmsg_new(GENLMSG_DEFAULT_SIZE, flags); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, 0, 0, &handshake_nl_family, 0, HANDSHAKE_CMD_READY); if (!hdr) goto out_free; if (nla_put_u32(msg, HANDSHAKE_A_ACCEPT_HANDLER_CLASS, proto->hp_handler_class) < 0) { genlmsg_cancel(msg, hdr); goto out_free; } genlmsg_end(msg, hdr); return genlmsg_multicast_netns(&handshake_nl_family, net, msg, 0, proto->hp_handler_class, flags); out_free: nlmsg_free(msg); return -EMSGSIZE; } /** * handshake_genl_put - Create a generic netlink message header * @msg: buffer in which to create the header * @info: generic netlink message context * * Returns a ready-to-use header, or NULL. */ struct nlmsghdr *handshake_genl_put(struct sk_buff *msg, struct genl_info *info) { return genlmsg_put(msg, info->snd_portid, info->snd_seq, &handshake_nl_family, 0, info->genlhdr->cmd); } EXPORT_SYMBOL(handshake_genl_put); int handshake_nl_accept_doit(struct sk_buff *skb, struct genl_info *info) { struct net *net = sock_net(skb->sk); struct handshake_net *hn = handshake_pernet(net); struct handshake_req *req = NULL; struct socket *sock; int class, err; err = -EOPNOTSUPP; if (!hn) goto out_status; err = -EINVAL; if (GENL_REQ_ATTR_CHECK(info, HANDSHAKE_A_ACCEPT_HANDLER_CLASS)) goto out_status; class = nla_get_u32(info->attrs[HANDSHAKE_A_ACCEPT_HANDLER_CLASS]); err = -EAGAIN; req = handshake_req_next(hn, class); if (req) { sock = req->hr_sk->sk_socket; FD_PREPARE(fdf, O_CLOEXEC, sock->file); if (fdf.err) { err = fdf.err; goto out_complete; } get_file(sock->file); /* FD_PREPARE() consumes a reference. */ err = req->hr_proto->hp_accept(req, info, fd_prepare_fd(fdf)); if (err) goto out_complete; /* Automatic cleanup handles fput */ trace_handshake_cmd_accept(net, req, req->hr_sk, fd_prepare_fd(fdf)); fd_publish(fdf); return 0; } out_complete: if (req) handshake_complete(req, -EIO, NULL); out_status: trace_handshake_cmd_accept_err(net, req, NULL, err); return err; } int handshake_nl_done_doit(struct sk_buff *skb, struct genl_info *info) { struct net *net = sock_net(skb->sk); struct handshake_req *req; struct socket *sock; int fd, status, err; if (GENL_REQ_ATTR_CHECK(info, HANDSHAKE_A_DONE_SOCKFD)) return -EINVAL; fd = nla_get_s32(info->attrs[HANDSHAKE_A_DONE_SOCKFD]); sock = sockfd_lookup(fd, &err); if (!sock) return err; req = handshake_req_hash_lookup(sock->sk); if (!req) { err = -EBUSY; trace_handshake_cmd_done_err(net, req, sock->sk, err); sockfd_put(sock); return err; } trace_handshake_cmd_done(net, req, sock->sk, fd); status = -EIO; if (info->attrs[HANDSHAKE_A_DONE_STATUS]) status = nla_get_u32(info->attrs[HANDSHAKE_A_DONE_STATUS]); handshake_complete(req, status, info); sockfd_put(sock); return 0; } static unsigned int handshake_net_id; static int __net_init handshake_net_init(struct net *net) { struct handshake_net *hn = net_generic(net, handshake_net_id); unsigned long tmp; struct sysinfo si; /* * Arbitrary limit to prevent handshakes that do not make * progress from clogging up the system. The cap scales up * with the amount of physical memory on the system. */ si_meminfo(&si); tmp = si.totalram / (25 * si.mem_unit); hn->hn_pending_max = clamp(tmp, 3UL, 50UL); spin_lock_init(&hn->hn_lock); hn->hn_pending = 0; hn->hn_flags = 0; INIT_LIST_HEAD(&hn->hn_requests); return 0; } static void __net_exit handshake_net_exit(struct net *net) { struct handshake_net *hn = net_generic(net, handshake_net_id); struct handshake_req *req; LIST_HEAD(requests); /* * Drain the net's pending list. Requests that have been * accepted and are in progress will be destroyed when * the socket is closed. */ spin_lock(&hn->hn_lock); set_bit(HANDSHAKE_F_NET_DRAINING, &hn->hn_flags); list_splice_init(&requests, &hn->hn_requests); spin_unlock(&hn->hn_lock); while (!list_empty(&requests)) { req = list_first_entry(&requests, struct handshake_req, hr_list); list_del(&req->hr_list); /* * Requests on this list have not yet been * accepted, so they do not have an fd to put. */ handshake_complete(req, -ETIMEDOUT, NULL); } } static struct pernet_operations handshake_genl_net_ops = { .init = handshake_net_init, .exit = handshake_net_exit, .id = &handshake_net_id, .size = sizeof(struct handshake_net), }; /** * handshake_pernet - Get the handshake private per-net structure * @net: network namespace * * Returns a pointer to the net's private per-net structure for the * handshake module, or NULL if handshake_init() failed. */ struct handshake_net *handshake_pernet(struct net *net) { return handshake_net_id ? net_generic(net, handshake_net_id) : NULL; } EXPORT_SYMBOL_IF_KUNIT(handshake_pernet); static int __init handshake_init(void) { int ret; ret = handshake_req_hash_init(); if (ret) { pr_warn("handshake: hash initialization failed (%d)\n", ret); return ret; } ret = genl_register_family(&handshake_nl_family); if (ret) { pr_warn("handshake: netlink registration failed (%d)\n", ret); handshake_req_hash_destroy(); return ret; } /* * ORDER: register_pernet_subsys must be done last. * * If initialization does not make it past pernet_subsys * registration, then handshake_net_id will remain 0. That * shunts the handshake consumer API to return ENOTSUPP * to prevent it from dereferencing something that hasn't * been allocated. */ ret = register_pernet_subsys(&handshake_genl_net_ops); if (ret) { pr_warn("handshake: pernet registration failed (%d)\n", ret); genl_unregister_family(&handshake_nl_family); handshake_req_hash_destroy(); } return ret; } static void __exit handshake_exit(void) { unregister_pernet_subsys(&handshake_genl_net_ops); handshake_net_id = 0; handshake_req_hash_destroy(); genl_unregister_family(&handshake_nl_family); } module_init(handshake_init); module_exit(handshake_exit);
1 1 1 1 2 1 2 3 3 3 2 3 1 3 3 2 3 3 3 3 3 3 3 3 2 1 1 1 2 3 3 3 3 5 10 10 10 16 16 16 16 6 16 16 1 9 9 9 12 12 12 12 12 12 12 12 8 8 8 8 8 8 15 15 9 9 9 15 15 15 15 15 15 15 4 4 4 2 2 2 4 6 2 6 2 2 2 2 10 6 9 6 2 5 5 2 6 6 7 5 2 2 4 5 4 2 2 4 4 4 2 10 10 10 10 1 10 11 11 2 11 11 11 8 8 8 8 8 7 8 8 8 8 8 8 8 8 8 8 11 11 8 8 1 1 8 8 11 8 8 8 11 11 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 // SPDX-License-Identifier: GPL-2.0-only /* * Input driver to ExplorerPS/2 device driver module. * * Copyright (c) 1999-2002 Vojtech Pavlik * Copyright (c) 2004 Dmitry Torokhov */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MOUSEDEV_MINOR_BASE 32 #define MOUSEDEV_MINORS 31 #define MOUSEDEV_MIX 63 #include <linux/bitops.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/module.h> #include <linux/init.h> #include <linux/input.h> #include <linux/random.h> #include <linux/major.h> #include <linux/device.h> #include <linux/cdev.h> #include <linux/kernel.h> MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION("Mouse (ExplorerPS/2) device interfaces"); MODULE_LICENSE("GPL"); #ifndef CONFIG_INPUT_MOUSEDEV_SCREEN_X #define CONFIG_INPUT_MOUSEDEV_SCREEN_X 1024 #endif #ifndef CONFIG_INPUT_MOUSEDEV_SCREEN_Y #define CONFIG_INPUT_MOUSEDEV_SCREEN_Y 768 #endif static int xres = CONFIG_INPUT_MOUSEDEV_SCREEN_X; module_param(xres, uint, 0644); MODULE_PARM_DESC(xres, "Horizontal screen resolution"); static int yres = CONFIG_INPUT_MOUSEDEV_SCREEN_Y; module_param(yres, uint, 0644); MODULE_PARM_DESC(yres, "Vertical screen resolution"); static unsigned tap_time = 200; module_param(tap_time, uint, 0644); MODULE_PARM_DESC(tap_time, "Tap time for touchpads in absolute mode (msecs)"); struct mousedev_hw_data { int dx, dy, dz; int x, y; int abs_event; unsigned long buttons; }; struct mousedev { int open; struct input_handle handle; wait_queue_head_t wait; struct list_head client_list; spinlock_t client_lock; /* protects client_list */ struct mutex mutex; struct device dev; struct cdev cdev; bool exist; struct list_head mixdev_node; bool opened_by_mixdev; struct mousedev_hw_data packet; unsigned int pkt_count; int old_x[4], old_y[4]; int frac_dx, frac_dy; unsigned long touch; int (*open_device)(struct mousedev *mousedev); void (*close_device)(struct mousedev *mousedev); }; enum mousedev_emul { MOUSEDEV_EMUL_PS2, MOUSEDEV_EMUL_IMPS, MOUSEDEV_EMUL_EXPS }; struct mousedev_motion { int dx, dy, dz; unsigned long buttons; }; #define PACKET_QUEUE_LEN 16 struct mousedev_client { struct fasync_struct *fasync; struct mousedev *mousedev; struct list_head node; struct mousedev_motion packets[PACKET_QUEUE_LEN]; unsigned int head, tail; spinlock_t packet_lock; int pos_x, pos_y; u8 ps2[6]; unsigned char ready, buffer, bufsiz; unsigned char imexseq, impsseq; enum mousedev_emul mode; unsigned long last_buttons; }; #define MOUSEDEV_SEQ_LEN 6 static unsigned char mousedev_imps_seq[] = { 0xf3, 200, 0xf3, 100, 0xf3, 80 }; static unsigned char mousedev_imex_seq[] = { 0xf3, 200, 0xf3, 200, 0xf3, 80 }; static struct mousedev *mousedev_mix; static LIST_HEAD(mousedev_mix_list); #define fx(i) (mousedev->old_x[(mousedev->pkt_count - (i)) & 03]) #define fy(i) (mousedev->old_y[(mousedev->pkt_count - (i)) & 03]) static void mousedev_touchpad_event(struct input_dev *dev, struct mousedev *mousedev, unsigned int code, int value) { int size, tmp; enum { FRACTION_DENOM = 128 }; switch (code) { case ABS_X: fx(0) = value; if (mousedev->touch && mousedev->pkt_count >= 2) { size = input_abs_get_max(dev, ABS_X) - input_abs_get_min(dev, ABS_X); if (size == 0) size = 256 * 2; tmp = ((value - fx(2)) * 256 * FRACTION_DENOM) / size; tmp += mousedev->frac_dx; mousedev->packet.dx = tmp / FRACTION_DENOM; mousedev->frac_dx = tmp - mousedev->packet.dx * FRACTION_DENOM; } break; case ABS_Y: fy(0) = value; if (mousedev->touch && mousedev->pkt_count >= 2) { /* use X size for ABS_Y to keep the same scale */ size = input_abs_get_max(dev, ABS_X) - input_abs_get_min(dev, ABS_X); if (size == 0) size = 256 * 2; tmp = -((value - fy(2)) * 256 * FRACTION_DENOM) / size; tmp += mousedev->frac_dy; mousedev->packet.dy = tmp / FRACTION_DENOM; mousedev->frac_dy = tmp - mousedev->packet.dy * FRACTION_DENOM; } break; } } static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev, unsigned int code, int value) { int min, max, size; switch (code) { case ABS_X: min = input_abs_get_min(dev, ABS_X); max = input_abs_get_max(dev, ABS_X); size = max - min; if (size == 0) size = xres ? : 1; value = clamp(value, min, max); mousedev->packet.x = ((value - min) * xres) / size; mousedev->packet.abs_event = 1; break; case ABS_Y: min = input_abs_get_min(dev, ABS_Y); max = input_abs_get_max(dev, ABS_Y); size = max - min; if (size == 0) size = yres ? : 1; value = clamp(value, min, max); mousedev->packet.y = yres - ((value - min) * yres) / size; mousedev->packet.abs_event = 1; break; } } static void mousedev_rel_event(struct mousedev *mousedev, unsigned int code, int value) { switch (code) { case REL_X: mousedev->packet.dx += value; break; case REL_Y: mousedev->packet.dy -= value; break; case REL_WHEEL: mousedev->packet.dz -= value; break; } } static void mousedev_key_event(struct mousedev *mousedev, unsigned int code, int value) { int index; switch (code) { case BTN_TOUCH: case BTN_0: case BTN_LEFT: index = 0; break; case BTN_STYLUS: case BTN_1: case BTN_RIGHT: index = 1; break; case BTN_2: case BTN_FORWARD: case BTN_STYLUS2: case BTN_MIDDLE: index = 2; break; case BTN_3: case BTN_BACK: case BTN_SIDE: index = 3; break; case BTN_4: case BTN_EXTRA: index = 4; break; default: return; } if (value) { set_bit(index, &mousedev->packet.buttons); set_bit(index, &mousedev_mix->packet.buttons); } else { clear_bit(index, &mousedev->packet.buttons); clear_bit(index, &mousedev_mix->packet.buttons); } } static void mousedev_notify_readers(struct mousedev *mousedev, struct mousedev_hw_data *packet) { struct mousedev_client *client; struct mousedev_motion *p; unsigned int new_head; int wake_readers = 0; rcu_read_lock(); list_for_each_entry_rcu(client, &mousedev->client_list, node) { /* Just acquire the lock, interrupts already disabled */ spin_lock(&client->packet_lock); p = &client->packets[client->head]; if (client->ready && p->buttons != mousedev->packet.buttons) { new_head = (client->head + 1) % PACKET_QUEUE_LEN; if (new_head != client->tail) { p = &client->packets[client->head = new_head]; memset(p, 0, sizeof(struct mousedev_motion)); } } if (packet->abs_event) { p->dx += packet->x - client->pos_x; p->dy += packet->y - client->pos_y; client->pos_x = packet->x; client->pos_y = packet->y; } client->pos_x += packet->dx; client->pos_x = clamp_val(client->pos_x, 0, xres); client->pos_y += packet->dy; client->pos_y = clamp_val(client->pos_y, 0, yres); p->dx += packet->dx; p->dy += packet->dy; p->dz += packet->dz; p->buttons = mousedev->packet.buttons; if (p->dx || p->dy || p->dz || p->buttons != client->last_buttons) client->ready = 1; spin_unlock(&client->packet_lock); if (client->ready) { kill_fasync(&client->fasync, SIGIO, POLL_IN); wake_readers = 1; } } rcu_read_unlock(); if (wake_readers) wake_up_interruptible(&mousedev->wait); } static void mousedev_touchpad_touch(struct mousedev *mousedev, int value) { if (!value) { if (mousedev->touch && time_before(jiffies, mousedev->touch + msecs_to_jiffies(tap_time))) { /* * Toggle left button to emulate tap. * We rely on the fact that mousedev_mix always has 0 * motion packet so we won't mess current position. */ set_bit(0, &mousedev->packet.buttons); set_bit(0, &mousedev_mix->packet.buttons); mousedev_notify_readers(mousedev, &mousedev_mix->packet); mousedev_notify_readers(mousedev_mix, &mousedev_mix->packet); clear_bit(0, &mousedev->packet.buttons); clear_bit(0, &mousedev_mix->packet.buttons); } mousedev->touch = mousedev->pkt_count = 0; mousedev->frac_dx = 0; mousedev->frac_dy = 0; } else if (!mousedev->touch) mousedev->touch = jiffies; } static void mousedev_event(struct input_handle *handle, unsigned int type, unsigned int code, int value) { struct mousedev *mousedev = handle->private; switch (type) { case EV_ABS: /* Ignore joysticks */ if (test_bit(BTN_TRIGGER, handle->dev->keybit)) return; if (test_bit(BTN_TOOL_FINGER, handle->dev->keybit)) mousedev_touchpad_event(handle->dev, mousedev, code, value); else mousedev_abs_event(handle->dev, mousedev, code, value); break; case EV_REL: mousedev_rel_event(mousedev, code, value); break; case EV_KEY: if (value != 2) { if (code == BTN_TOUCH && test_bit(BTN_TOOL_FINGER, handle->dev->keybit)) mousedev_touchpad_touch(mousedev, value); else mousedev_key_event(mousedev, code, value); } break; case EV_SYN: if (code == SYN_REPORT) { if (mousedev->touch) { mousedev->pkt_count++; /* * Input system eats duplicate events, * but we need all of them to do correct * averaging so apply present one forward */ fx(0) = fx(1); fy(0) = fy(1); } mousedev_notify_readers(mousedev, &mousedev->packet); mousedev_notify_readers(mousedev_mix, &mousedev->packet); mousedev->packet.dx = mousedev->packet.dy = mousedev->packet.dz = 0; mousedev->packet.abs_event = 0; } break; } } static int mousedev_fasync(int fd, struct file *file, int on) { struct mousedev_client *client = file->private_data; return fasync_helper(fd, file, on, &client->fasync); } static void mousedev_free(struct device *dev) { struct mousedev *mousedev = container_of(dev, struct mousedev, dev); input_put_device(mousedev->handle.dev); kfree(mousedev); } static int mousedev_open_device(struct mousedev *mousedev) { int retval; retval = mutex_lock_interruptible(&mousedev->mutex); if (retval) return retval; if (!mousedev->exist) retval = -ENODEV; else if (!mousedev->open++) { retval = input_open_device(&mousedev->handle); if (retval) mousedev->open--; } mutex_unlock(&mousedev->mutex); return retval; } static void mousedev_close_device(struct mousedev *mousedev) { mutex_lock(&mousedev->mutex); if (mousedev->exist && !--mousedev->open) input_close_device(&mousedev->handle); mutex_unlock(&mousedev->mutex); } /* * Open all available devices so they can all be multiplexed in one. * stream. Note that this function is called with mousedev_mix->mutex * held. */ static int mixdev_open_devices(struct mousedev *mixdev) { int error; error = mutex_lock_interruptible(&mixdev->mutex); if (error) return error; if (!mixdev->open++) { struct mousedev *mousedev; list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) { if (!mousedev->opened_by_mixdev) { if (mousedev_open_device(mousedev)) continue; mousedev->opened_by_mixdev = true; } } } mutex_unlock(&mixdev->mutex); return 0; } /* * Close all devices that were opened as part of multiplexed * device. Note that this function is called with mousedev_mix->mutex * held. */ static void mixdev_close_devices(struct mousedev *mixdev) { mutex_lock(&mixdev->mutex); if (!--mixdev->open) { struct mousedev *mousedev; list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) { if (mousedev->opened_by_mixdev) { mousedev->opened_by_mixdev = false; mousedev_close_device(mousedev); } } } mutex_unlock(&mixdev->mutex); } static void mousedev_attach_client(struct mousedev *mousedev, struct mousedev_client *client) { spin_lock(&mousedev->client_lock); list_add_tail_rcu(&client->node, &mousedev->client_list); spin_unlock(&mousedev->client_lock); } static void mousedev_detach_client(struct mousedev *mousedev, struct mousedev_client *client) { spin_lock(&mousedev->client_lock); list_del_rcu(&client->node); spin_unlock(&mousedev->client_lock); synchronize_rcu(); } static int mousedev_release(struct inode *inode, struct file *file) { struct mousedev_client *client = file->private_data; struct mousedev *mousedev = client->mousedev; mousedev_detach_client(mousedev, client); kfree(client); mousedev->close_device(mousedev); return 0; } static int mousedev_open(struct inode *inode, struct file *file) { struct mousedev_client *client; struct mousedev *mousedev; int error; #ifdef CONFIG_INPUT_MOUSEDEV_PSAUX if (imajor(inode) == MISC_MAJOR) mousedev = mousedev_mix; else #endif mousedev = container_of(inode->i_cdev, struct mousedev, cdev); client = kzalloc_obj(struct mousedev_client); if (!client) return -ENOMEM; spin_lock_init(&client->packet_lock); client->pos_x = xres / 2; client->pos_y = yres / 2; client->mousedev = mousedev; mousedev_attach_client(mousedev, client); error = mousedev->open_device(mousedev); if (error) goto err_free_client; file->private_data = client; stream_open(inode, file); return 0; err_free_client: mousedev_detach_client(mousedev, client); kfree(client); return error; } static void mousedev_packet(struct mousedev_client *client, u8 *ps2_data) { struct mousedev_motion *p = &client->packets[client->tail]; s8 dx, dy, dz; dx = clamp_val(p->dx, -127, 127); p->dx -= dx; dy = clamp_val(p->dy, -127, 127); p->dy -= dy; ps2_data[0] = BIT(3); ps2_data[0] |= ((dx & BIT(7)) >> 3) | ((dy & BIT(7)) >> 2); ps2_data[0] |= p->buttons & 0x07; ps2_data[1] = dx; ps2_data[2] = dy; switch (client->mode) { case MOUSEDEV_EMUL_EXPS: dz = clamp_val(p->dz, -7, 7); p->dz -= dz; ps2_data[3] = (dz & 0x0f) | ((p->buttons & 0x18) << 1); client->bufsiz = 4; break; case MOUSEDEV_EMUL_IMPS: dz = clamp_val(p->dz, -127, 127); p->dz -= dz; ps2_data[0] |= ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1); ps2_data[3] = dz; client->bufsiz = 4; break; case MOUSEDEV_EMUL_PS2: default: p->dz = 0; ps2_data[0] |= ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1); client->bufsiz = 3; break; } if (!p->dx && !p->dy && !p->dz) { if (client->tail == client->head) { client->ready = 0; client->last_buttons = p->buttons; } else client->tail = (client->tail + 1) % PACKET_QUEUE_LEN; } } static void mousedev_generate_response(struct mousedev_client *client, int command) { client->ps2[0] = 0xfa; /* ACK */ switch (command) { case 0xeb: /* Poll */ mousedev_packet(client, &client->ps2[1]); client->bufsiz++; /* account for leading ACK */ break; case 0xf2: /* Get ID */ switch (client->mode) { case MOUSEDEV_EMUL_PS2: client->ps2[1] = 0; break; case MOUSEDEV_EMUL_IMPS: client->ps2[1] = 3; break; case MOUSEDEV_EMUL_EXPS: client->ps2[1] = 4; break; } client->bufsiz = 2; break; case 0xe9: /* Get info */ client->ps2[1] = 0x60; client->ps2[2] = 3; client->ps2[3] = 200; client->bufsiz = 4; break; case 0xff: /* Reset */ client->impsseq = client->imexseq = 0; client->mode = MOUSEDEV_EMUL_PS2; client->ps2[1] = 0xaa; client->ps2[2] = 0x00; client->bufsiz = 3; break; default: client->bufsiz = 1; break; } client->buffer = client->bufsiz; } static ssize_t mousedev_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { struct mousedev_client *client = file->private_data; unsigned char c; unsigned int i; for (i = 0; i < count; i++) { if (get_user(c, buffer + i)) return -EFAULT; spin_lock_irq(&client->packet_lock); if (c == mousedev_imex_seq[client->imexseq]) { if (++client->imexseq == MOUSEDEV_SEQ_LEN) { client->imexseq = 0; client->mode = MOUSEDEV_EMUL_EXPS; } } else client->imexseq = 0; if (c == mousedev_imps_seq[client->impsseq]) { if (++client->impsseq == MOUSEDEV_SEQ_LEN) { client->impsseq = 0; client->mode = MOUSEDEV_EMUL_IMPS; } } else client->impsseq = 0; mousedev_generate_response(client, c); spin_unlock_irq(&client->packet_lock); cond_resched(); } kill_fasync(&client->fasync, SIGIO, POLL_IN); wake_up_interruptible(&client->mousedev->wait); return count; } static ssize_t mousedev_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct mousedev_client *client = file->private_data; struct mousedev *mousedev = client->mousedev; u8 data[sizeof(client->ps2)]; int retval = 0; if (!client->ready && !client->buffer && mousedev->exist && (file->f_flags & O_NONBLOCK)) return -EAGAIN; retval = wait_event_interruptible(mousedev->wait, !mousedev->exist || client->ready || client->buffer); if (retval) return retval; if (!mousedev->exist) return -ENODEV; spin_lock_irq(&client->packet_lock); if (!client->buffer && client->ready) { mousedev_packet(client, client->ps2); client->buffer = client->bufsiz; } if (count > client->buffer) count = client->buffer; memcpy(data, client->ps2 + client->bufsiz - client->buffer, count); client->buffer -= count; spin_unlock_irq(&client->packet_lock); if (copy_to_user(buffer, data, count)) return -EFAULT; return count; } /* No kernel lock - fine */ static __poll_t mousedev_poll(struct file *file, poll_table *wait) { struct mousedev_client *client = file->private_data; struct mousedev *mousedev = client->mousedev; __poll_t mask; poll_wait(file, &mousedev->wait, wait); mask = mousedev->exist ? EPOLLOUT | EPOLLWRNORM : EPOLLHUP | EPOLLERR; if (client->ready || client->buffer) mask |= EPOLLIN | EPOLLRDNORM; return mask; } static const struct file_operations mousedev_fops = { .owner = THIS_MODULE, .read = mousedev_read, .write = mousedev_write, .poll = mousedev_poll, .open = mousedev_open, .release = mousedev_release, .fasync = mousedev_fasync, .llseek = noop_llseek, }; /* * Mark device non-existent. This disables writes, ioctls and * prevents new users from opening the device. Already posted * blocking reads will stay, however new ones will fail. */ static void mousedev_mark_dead(struct mousedev *mousedev) { mutex_lock(&mousedev->mutex); mousedev->exist = false; mutex_unlock(&mousedev->mutex); } /* * Wake up users waiting for IO so they can disconnect from * dead device. */ static void mousedev_hangup(struct mousedev *mousedev) { struct mousedev_client *client; spin_lock(&mousedev->client_lock); list_for_each_entry(client, &mousedev->client_list, node) kill_fasync(&client->fasync, SIGIO, POLL_HUP); spin_unlock(&mousedev->client_lock); wake_up_interruptible(&mousedev->wait); } static void mousedev_cleanup(struct mousedev *mousedev) { struct input_handle *handle = &mousedev->handle; mousedev_mark_dead(mousedev); mousedev_hangup(mousedev); /* mousedev is marked dead so no one else accesses mousedev->open */ if (mousedev->open) input_close_device(handle); } static int mousedev_reserve_minor(bool mixdev) { int minor; if (mixdev) { minor = input_get_new_minor(MOUSEDEV_MIX, 1, false); if (minor < 0) pr_err("failed to reserve mixdev minor: %d\n", minor); } else { minor = input_get_new_minor(MOUSEDEV_MINOR_BASE, MOUSEDEV_MINORS, true); if (minor < 0) pr_err("failed to reserve new minor: %d\n", minor); } return minor; } static struct mousedev *mousedev_create(struct input_dev *dev, struct input_handler *handler, bool mixdev) { struct mousedev *mousedev; int minor; int error; minor = mousedev_reserve_minor(mixdev); if (minor < 0) { error = minor; goto err_out; } mousedev = kzalloc_obj(struct mousedev); if (!mousedev) { error = -ENOMEM; goto err_free_minor; } INIT_LIST_HEAD(&mousedev->client_list); INIT_LIST_HEAD(&mousedev->mixdev_node); spin_lock_init(&mousedev->client_lock); mutex_init(&mousedev->mutex); lockdep_set_subclass(&mousedev->mutex, mixdev ? SINGLE_DEPTH_NESTING : 0); init_waitqueue_head(&mousedev->wait); if (mixdev) { dev_set_name(&mousedev->dev, "mice"); mousedev->open_device = mixdev_open_devices; mousedev->close_device = mixdev_close_devices; } else { int dev_no = minor; /* Normalize device number if it falls into legacy range */ if (dev_no < MOUSEDEV_MINOR_BASE + MOUSEDEV_MINORS) dev_no -= MOUSEDEV_MINOR_BASE; dev_set_name(&mousedev->dev, "mouse%d", dev_no); mousedev->open_device = mousedev_open_device; mousedev->close_device = mousedev_close_device; } mousedev->exist = true; mousedev->handle.dev = input_get_device(dev); mousedev->handle.name = dev_name(&mousedev->dev); mousedev->handle.handler = handler; mousedev->handle.private = mousedev; mousedev->dev.class = &input_class; if (dev) mousedev->dev.parent = &dev->dev; mousedev->dev.devt = MKDEV(INPUT_MAJOR, minor); mousedev->dev.release = mousedev_free; device_initialize(&mousedev->dev); if (!mixdev) { error = input_register_handle(&mousedev->handle); if (error) goto err_free_mousedev; } cdev_init(&mousedev->cdev, &mousedev_fops); error = cdev_device_add(&mousedev->cdev, &mousedev->dev); if (error) goto err_cleanup_mousedev; return mousedev; err_cleanup_mousedev: mousedev_cleanup(mousedev); if (!mixdev) input_unregister_handle(&mousedev->handle); err_free_mousedev: put_device(&mousedev->dev); err_free_minor: input_free_minor(minor); err_out: return ERR_PTR(error); } static void mousedev_destroy(struct mousedev *mousedev) { cdev_device_del(&mousedev->cdev, &mousedev->dev); mousedev_cleanup(mousedev); input_free_minor(MINOR(mousedev->dev.devt)); if (mousedev != mousedev_mix) input_unregister_handle(&mousedev->handle); put_device(&mousedev->dev); } static int mixdev_add_device(struct mousedev *mousedev) { int retval; retval = mutex_lock_interruptible(&mousedev_mix->mutex); if (retval) return retval; if (mousedev_mix->open) { retval = mousedev_open_device(mousedev); if (retval) goto out; mousedev->opened_by_mixdev = true; } get_device(&mousedev->dev); list_add_tail(&mousedev->mixdev_node, &mousedev_mix_list); out: mutex_unlock(&mousedev_mix->mutex); return retval; } static void mixdev_remove_device(struct mousedev *mousedev) { mutex_lock(&mousedev_mix->mutex); if (mousedev->opened_by_mixdev) { mousedev->opened_by_mixdev = false; mousedev_close_device(mousedev); } list_del_init(&mousedev->mixdev_node); mutex_unlock(&mousedev_mix->mutex); put_device(&mousedev->dev); } static int mousedev_connect(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id) { struct mousedev *mousedev; int error; mousedev = mousedev_create(dev, handler, false); if (IS_ERR(mousedev)) return PTR_ERR(mousedev); error = mixdev_add_device(mousedev); if (error) { mousedev_destroy(mousedev); return error; } return 0; } static void mousedev_disconnect(struct input_handle *handle) { struct mousedev *mousedev = handle->private; mixdev_remove_device(mousedev); mousedev_destroy(mousedev); } static const struct input_device_id mousedev_ids[] = { { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT | INPUT_DEVICE_ID_MATCH_RELBIT, .evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_REL) }, .keybit = { [BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) }, .relbit = { BIT_MASK(REL_X) | BIT_MASK(REL_Y) }, }, /* A mouse like device, at least one button, two relative axes */ { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_RELBIT, .evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_REL) }, .relbit = { BIT_MASK(REL_WHEEL) }, }, /* A separate scrollwheel */ { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT | INPUT_DEVICE_ID_MATCH_ABSBIT, .evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) }, .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) }, .absbit = { BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) }, }, /* A tablet like device, at least touch detection, two absolute axes */ { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT | INPUT_DEVICE_ID_MATCH_ABSBIT, .evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) }, .keybit = { [BIT_WORD(BTN_TOOL_FINGER)] = BIT_MASK(BTN_TOOL_FINGER) }, .absbit = { BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) | BIT_MASK(ABS_PRESSURE) | BIT_MASK(ABS_TOOL_WIDTH) }, }, /* A touchpad */ { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT | INPUT_DEVICE_ID_MATCH_ABSBIT, .evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) }, .keybit = { [BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) }, .absbit = { BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) }, }, /* Mouse-like device with absolute X and Y but ordinary clicks, like hp ILO2 High Performance mouse */ { }, /* Terminating entry */ }; MODULE_DEVICE_TABLE(input, mousedev_ids); static struct input_handler mousedev_handler = { .event = mousedev_event, .connect = mousedev_connect, .disconnect = mousedev_disconnect, .legacy_minors = true, .minor = MOUSEDEV_MINOR_BASE, .name = "mousedev", .id_table = mousedev_ids, }; #ifdef CONFIG_INPUT_MOUSEDEV_PSAUX #include <linux/miscdevice.h> static struct miscdevice psaux_mouse = { .minor = PSMOUSE_MINOR, .name = "psaux", .fops = &mousedev_fops, }; static bool psaux_registered; static void __init mousedev_psaux_register(void) { int error; error = misc_register(&psaux_mouse); if (error) pr_warn("could not register psaux device, error: %d\n", error); else psaux_registered = true; } static void __exit mousedev_psaux_unregister(void) { if (psaux_registered) misc_deregister(&psaux_mouse); } #else static inline void mousedev_psaux_register(void) { } static inline void mousedev_psaux_unregister(void) { } #endif static int __init mousedev_init(void) { int error; mousedev_mix = mousedev_create(NULL, &mousedev_handler, true); if (IS_ERR(mousedev_mix)) return PTR_ERR(mousedev_mix); error = input_register_handler(&mousedev_handler); if (error) { mousedev_destroy(mousedev_mix); return error; } mousedev_psaux_register(); pr_info("PS/2 mouse device common for all mice\n"); return 0; } static void __exit mousedev_exit(void) { mousedev_psaux_unregister(); input_unregister_handler(&mousedev_handler); mousedev_destroy(mousedev_mix); } module_init(mousedev_init); module_exit(mousedev_exit);
2 2 2 2 1 1 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 // SPDX-License-Identifier: GPL-2.0-only /* * (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> * Copyright (c) 2011 Patrick McHardy <kaber@trash.net> * * Based on Rusty Russell's IPv4 REDIRECT target. Development of IPv6 * NAT funded by Astaro. */ #include <linux/if.h> #include <linux/inetdevice.h> #include <linux/ip.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/netfilter.h> #include <linux/types.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv6.h> #include <linux/netfilter/x_tables.h> #include <net/addrconf.h> #include <net/checksum.h> #include <net/protocol.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_redirect.h> static unsigned int redirect_tg6(struct sk_buff *skb, const struct xt_action_param *par) { return nf_nat_redirect_ipv6(skb, par->targinfo, xt_hooknum(par)); } static int redirect_tg6_checkentry(const struct xt_tgchk_param *par) { const struct nf_nat_range2 *range = par->targinfo; if (range->flags & NF_NAT_RANGE_MAP_IPS) return -EINVAL; return nf_ct_netns_get(par->net, par->family); } static void redirect_tg_destroy(const struct xt_tgdtor_param *par) { nf_ct_netns_put(par->net, par->family); } static int redirect_tg4_check(const struct xt_tgchk_param *par) { const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; if (mr->range[0].flags & NF_NAT_RANGE_MAP_IPS) { pr_debug("bad MAP_IPS.\n"); return -EINVAL; } if (mr->rangesize != 1) { pr_debug("bad rangesize %u.\n", mr->rangesize); return -EINVAL; } return nf_ct_netns_get(par->net, par->family); } static unsigned int redirect_tg4(struct sk_buff *skb, const struct xt_action_param *par) { const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; struct nf_nat_range2 range = { .flags = mr->range[0].flags, .min_proto = mr->range[0].min, .max_proto = mr->range[0].max, }; return nf_nat_redirect_ipv4(skb, &range, xt_hooknum(par)); } static struct xt_target redirect_tg_reg[] __read_mostly = { { .name = "REDIRECT", .family = NFPROTO_IPV6, .revision = 0, .table = "nat", .checkentry = redirect_tg6_checkentry, .destroy = redirect_tg_destroy, .target = redirect_tg6, .targetsize = sizeof(struct nf_nat_range), .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT), .me = THIS_MODULE, }, { .name = "REDIRECT", .family = NFPROTO_IPV4, .revision = 0, .table = "nat", .target = redirect_tg4, .checkentry = redirect_tg4_check, .destroy = redirect_tg_destroy, .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat), .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT), .me = THIS_MODULE, }, }; static int __init redirect_tg_init(void) { return xt_register_targets(redirect_tg_reg, ARRAY_SIZE(redirect_tg_reg)); } static void __exit redirect_tg_exit(void) { xt_unregister_targets(redirect_tg_reg, ARRAY_SIZE(redirect_tg_reg)); } module_init(redirect_tg_init); module_exit(redirect_tg_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); MODULE_DESCRIPTION("Xtables: Connection redirection to localhost"); MODULE_ALIAS("ip6t_REDIRECT"); MODULE_ALIAS("ipt_REDIRECT");
9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PAGE_COUNTER_H #define _LINUX_PAGE_COUNTER_H #include <linux/atomic.h> #include <linux/cache.h> #include <linux/limits.h> #include <asm/page.h> struct page_counter { /* * Make sure 'usage' does not share cacheline with any other field in * v2. The memcg->memory.usage is a hot member of struct mem_cgroup. */ atomic_long_t usage; unsigned long failcnt; /* v1-only field */ CACHELINE_PADDING(_pad1_); /* effective memory.min and memory.min usage tracking */ unsigned long emin; atomic_long_t min_usage; atomic_long_t children_min_usage; /* effective memory.low and memory.low usage tracking */ unsigned long elow; atomic_long_t low_usage; atomic_long_t children_low_usage; unsigned long watermark; /* Latest cg2 reset watermark */ unsigned long local_watermark; /* Keep all the read most fields in a separete cacheline. */ CACHELINE_PADDING(_pad2_); bool protection_support; bool track_failcnt; unsigned long min; unsigned long low; unsigned long high; unsigned long max; struct page_counter *parent; } ____cacheline_internodealigned_in_smp; #if BITS_PER_LONG == 32 #define PAGE_COUNTER_MAX LONG_MAX #else #define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE) #endif /* * Protection is supported only for the first counter (with id 0). */ static inline void page_counter_init(struct page_counter *counter, struct page_counter *parent, bool protection_support) { counter->usage = (atomic_long_t)ATOMIC_LONG_INIT(0); counter->max = PAGE_COUNTER_MAX; counter->parent = parent; counter->protection_support = protection_support; counter->track_failcnt = false; } static inline unsigned long page_counter_read(struct page_counter *counter) { return atomic_long_read(&counter->usage); } void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); void page_counter_charge(struct page_counter *counter, unsigned long nr_pages); bool page_counter_try_charge(struct page_counter *counter, unsigned long nr_pages, struct page_counter **fail); void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages); void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages); static inline void page_counter_set_high(struct page_counter *counter, unsigned long nr_pages) { WRITE_ONCE(counter->high, nr_pages); } int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages); int page_counter_memparse(const char *buf, const char *max, unsigned long *nr_pages); static inline void page_counter_reset_watermark(struct page_counter *counter) { unsigned long usage = page_counter_read(counter); /* * Update local_watermark first, so it's always <= watermark * (modulo CPU/compiler re-ordering) */ counter->local_watermark = usage; counter->watermark = usage; } #if IS_ENABLED(CONFIG_MEMCG) || IS_ENABLED(CONFIG_CGROUP_DMEM) void page_counter_calculate_protection(struct page_counter *root, struct page_counter *counter, bool recursive_protection); #else static inline void page_counter_calculate_protection(struct page_counter *root, struct page_counter *counter, bool recursive_protection) {} #endif #endif /* _LINUX_PAGE_COUNTER_H */
397 4 4 4 347 40 38 47 38 47 5 5 1 350 351 347 11 11 11 5 11 11 395 351 395 36 3 3 1 2 1 3 1 1 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 // SPDX-License-Identifier: GPL-2.0-only /* * Process number limiting controller for cgroups. * * Used to allow a cgroup hierarchy to stop any new processes from fork()ing * after a certain limit is reached. * * Since it is trivial to hit the task limit without hitting any kmemcg limits * in place, PIDs are a fundamental resource. As such, PID exhaustion must be * preventable in the scope of a cgroup hierarchy by allowing resource limiting * of the number of tasks in a cgroup. * * In order to use the `pids` controller, set the maximum number of tasks in * pids.max (this is not available in the root cgroup for obvious reasons). The * number of processes currently in the cgroup is given by pids.current. * Organisational operations are not blocked by cgroup policies, so it is * possible to have pids.current > pids.max. However, it is not possible to * violate a cgroup policy through fork(). fork() will return -EAGAIN if forking * would cause a cgroup policy to be violated. * * To set a cgroup to have no limit, set pids.max to "max". This is the default * for all new cgroups (N.B. that PID limits are hierarchical, so the most * stringent limit in the hierarchy is followed). * * pids.current tracks all child cgroup hierarchies, so parent/pids.current is * a superset of parent/child/pids.current. * * Copyright (C) 2015 Aleksa Sarai <cyphar@cyphar.com> */ #include <linux/kernel.h> #include <linux/threads.h> #include <linux/atomic.h> #include <linux/cgroup.h> #include <linux/slab.h> #include <linux/sched/task.h> #define PIDS_MAX (PID_MAX_LIMIT + 1ULL) #define PIDS_MAX_STR "max" enum pidcg_event { /* Fork failed in subtree because this pids_cgroup limit was hit. */ PIDCG_MAX, /* Fork failed in this pids_cgroup because ancestor limit was hit. */ PIDCG_FORKFAIL, NR_PIDCG_EVENTS, }; struct pids_cgroup { struct cgroup_subsys_state css; /* * Use 64-bit types so that we can safely represent "max" as * %PIDS_MAX = (%PID_MAX_LIMIT + 1). */ atomic64_t counter; atomic64_t limit; int64_t watermark; /* Handles for pids.events[.local] */ struct cgroup_file events_file; struct cgroup_file events_local_file; atomic64_t events[NR_PIDCG_EVENTS]; atomic64_t events_local[NR_PIDCG_EVENTS]; }; static struct pids_cgroup *css_pids(struct cgroup_subsys_state *css) { return container_of(css, struct pids_cgroup, css); } static struct pids_cgroup *parent_pids(struct pids_cgroup *pids) { return css_pids(pids->css.parent); } static struct cgroup_subsys_state * pids_css_alloc(struct cgroup_subsys_state *parent) { struct pids_cgroup *pids; pids = kzalloc_obj(struct pids_cgroup); if (!pids) return ERR_PTR(-ENOMEM); atomic64_set(&pids->limit, PIDS_MAX); return &pids->css; } static void pids_css_free(struct cgroup_subsys_state *css) { kfree(css_pids(css)); } static void pids_update_watermark(struct pids_cgroup *p, int64_t nr_pids) { /* * This is racy, but we don't need perfectly accurate tallying of * the watermark, and this lets us avoid extra atomic overhead. */ if (nr_pids > READ_ONCE(p->watermark)) WRITE_ONCE(p->watermark, nr_pids); } /** * pids_cancel - uncharge the local pid count * @pids: the pid cgroup state * @num: the number of pids to cancel * * This function will WARN if the pid count goes under 0, because such a case is * a bug in the pids controller proper. */ static void pids_cancel(struct pids_cgroup *pids, int num) { /* * A negative count (or overflow for that matter) is invalid, * and indicates a bug in the `pids` controller proper. */ WARN_ON_ONCE(atomic64_add_negative(-num, &pids->counter)); } /** * pids_uncharge - hierarchically uncharge the pid count * @pids: the pid cgroup state * @num: the number of pids to uncharge */ static void pids_uncharge(struct pids_cgroup *pids, int num) { struct pids_cgroup *p; for (p = pids; parent_pids(p); p = parent_pids(p)) pids_cancel(p, num); } /** * pids_charge - hierarchically charge the pid count * @pids: the pid cgroup state * @num: the number of pids to charge * * This function does *not* follow the pid limit set. It cannot fail and the new * pid count may exceed the limit. This is only used for reverting failed * attaches, where there is no other way out than violating the limit. */ static void pids_charge(struct pids_cgroup *pids, int num) { struct pids_cgroup *p; for (p = pids; parent_pids(p); p = parent_pids(p)) { int64_t new = atomic64_add_return(num, &p->counter); pids_update_watermark(p, new); } } /** * pids_try_charge - hierarchically try to charge the pid count * @pids: the pid cgroup state * @num: the number of pids to charge * @fail: storage of pid cgroup causing the fail * * This function follows the set limit. It will fail if the charge would cause * the new value to exceed the hierarchical limit. Returns 0 if the charge * succeeded, otherwise -EAGAIN. */ static int pids_try_charge(struct pids_cgroup *pids, int num, struct pids_cgroup **fail) { struct pids_cgroup *p, *q; for (p = pids; parent_pids(p); p = parent_pids(p)) { int64_t new = atomic64_add_return(num, &p->counter); int64_t limit = atomic64_read(&p->limit); /* * Since new is capped to the maximum number of pid_t, if * p->limit is %PIDS_MAX then we know that this test will never * fail. */ if (new > limit) { *fail = p; goto revert; } /* * Not technically accurate if we go over limit somewhere up * the hierarchy, but that's tolerable for the watermark. */ pids_update_watermark(p, new); } return 0; revert: for (q = pids; q != p; q = parent_pids(q)) pids_cancel(q, num); pids_cancel(p, num); return -EAGAIN; } static int pids_can_attach(struct cgroup_taskset *tset) { struct task_struct *task; struct cgroup_subsys_state *dst_css; cgroup_taskset_for_each(task, dst_css, tset) { struct pids_cgroup *pids = css_pids(dst_css); struct cgroup_subsys_state *old_css; struct pids_cgroup *old_pids; /* * No need to pin @old_css between here and cancel_attach() * because cgroup core protects it from being freed before * the migration completes or fails. */ old_css = task_css(task, pids_cgrp_id); old_pids = css_pids(old_css); pids_charge(pids, 1); pids_uncharge(old_pids, 1); } return 0; } static void pids_cancel_attach(struct cgroup_taskset *tset) { struct task_struct *task; struct cgroup_subsys_state *dst_css; cgroup_taskset_for_each(task, dst_css, tset) { struct pids_cgroup *pids = css_pids(dst_css); struct cgroup_subsys_state *old_css; struct pids_cgroup *old_pids; old_css = task_css(task, pids_cgrp_id); old_pids = css_pids(old_css); pids_charge(old_pids, 1); pids_uncharge(pids, 1); } } static void pids_event(struct pids_cgroup *pids_forking, struct pids_cgroup *pids_over_limit) { struct pids_cgroup *p = pids_forking; /* Only log the first time limit is hit. */ if (atomic64_inc_return(&p->events_local[PIDCG_FORKFAIL]) == 1) { pr_info("cgroup: fork rejected by pids controller in "); pr_cont_cgroup_path(p->css.cgroup); pr_cont("\n"); } if (!cgroup_subsys_on_dfl(pids_cgrp_subsys) || cgrp_dfl_root.flags & CGRP_ROOT_PIDS_LOCAL_EVENTS) { cgroup_file_notify(&p->events_local_file); return; } atomic64_inc(&pids_over_limit->events_local[PIDCG_MAX]); cgroup_file_notify(&pids_over_limit->events_local_file); for (p = pids_over_limit; parent_pids(p); p = parent_pids(p)) { atomic64_inc(&p->events[PIDCG_MAX]); cgroup_file_notify(&p->events_file); } } /* * task_css_check(true) in pids_can_fork() and pids_cancel_fork() relies * on cgroup_threadgroup_change_begin() held by the copy_process(). */ static int pids_can_fork(struct task_struct *task, struct css_set *cset) { struct pids_cgroup *pids, *pids_over_limit; int err; pids = css_pids(cset->subsys[pids_cgrp_id]); err = pids_try_charge(pids, 1, &pids_over_limit); if (err) pids_event(pids, pids_over_limit); return err; } static void pids_cancel_fork(struct task_struct *task, struct css_set *cset) { struct pids_cgroup *pids; pids = css_pids(cset->subsys[pids_cgrp_id]); pids_uncharge(pids, 1); } static void pids_release(struct task_struct *task) { struct pids_cgroup *pids = css_pids(task_css(task, pids_cgrp_id)); pids_uncharge(pids, 1); } static ssize_t pids_max_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { struct cgroup_subsys_state *css = of_css(of); struct pids_cgroup *pids = css_pids(css); int64_t limit; int err; buf = strstrip(buf); if (!strcmp(buf, PIDS_MAX_STR)) { limit = PIDS_MAX; goto set_limit; } err = kstrtoll(buf, 0, &limit); if (err) return err; if (limit < 0 || limit >= PIDS_MAX) return -EINVAL; set_limit: /* * Limit updates don't need to be mutex'd, since it isn't * critical that any racing fork()s follow the new limit. */ atomic64_set(&pids->limit, limit); return nbytes; } static int pids_max_show(struct seq_file *sf, void *v) { struct cgroup_subsys_state *css = seq_css(sf); struct pids_cgroup *pids = css_pids(css); int64_t limit = atomic64_read(&pids->limit); if (limit >= PIDS_MAX) seq_printf(sf, "%s\n", PIDS_MAX_STR); else seq_printf(sf, "%lld\n", limit); return 0; } static s64 pids_current_read(struct cgroup_subsys_state *css, struct cftype *cft) { struct pids_cgroup *pids = css_pids(css); return atomic64_read(&pids->counter); } static s64 pids_peak_read(struct cgroup_subsys_state *css, struct cftype *cft) { struct pids_cgroup *pids = css_pids(css); return READ_ONCE(pids->watermark); } static int __pids_events_show(struct seq_file *sf, bool local) { struct pids_cgroup *pids = css_pids(seq_css(sf)); enum pidcg_event pe = PIDCG_MAX; atomic64_t *events; if (!cgroup_subsys_on_dfl(pids_cgrp_subsys) || cgrp_dfl_root.flags & CGRP_ROOT_PIDS_LOCAL_EVENTS) { pe = PIDCG_FORKFAIL; local = true; } events = local ? pids->events_local : pids->events; seq_printf(sf, "max %lld\n", (s64)atomic64_read(&events[pe])); return 0; } static int pids_events_show(struct seq_file *sf, void *v) { __pids_events_show(sf, false); return 0; } static int pids_events_local_show(struct seq_file *sf, void *v) { __pids_events_show(sf, true); return 0; } static struct cftype pids_files[] = { { .name = "max", .write = pids_max_write, .seq_show = pids_max_show, .flags = CFTYPE_NOT_ON_ROOT, }, { .name = "current", .read_s64 = pids_current_read, .flags = CFTYPE_NOT_ON_ROOT, }, { .name = "peak", .flags = CFTYPE_NOT_ON_ROOT, .read_s64 = pids_peak_read, }, { .name = "events", .seq_show = pids_events_show, .file_offset = offsetof(struct pids_cgroup, events_file), .flags = CFTYPE_NOT_ON_ROOT, }, { .name = "events.local", .seq_show = pids_events_local_show, .file_offset = offsetof(struct pids_cgroup, events_local_file), .flags = CFTYPE_NOT_ON_ROOT, }, { } /* terminate */ }; static struct cftype pids_files_legacy[] = { { .name = "max", .write = pids_max_write, .seq_show = pids_max_show, .flags = CFTYPE_NOT_ON_ROOT, }, { .name = "current", .read_s64 = pids_current_read, .flags = CFTYPE_NOT_ON_ROOT, }, { .name = "peak", .flags = CFTYPE_NOT_ON_ROOT, .read_s64 = pids_peak_read, }, { .name = "events", .seq_show = pids_events_show, .file_offset = offsetof(struct pids_cgroup, events_file), .flags = CFTYPE_NOT_ON_ROOT, }, { } /* terminate */ }; struct cgroup_subsys pids_cgrp_subsys = { .css_alloc = pids_css_alloc, .css_free = pids_css_free, .can_attach = pids_can_attach, .cancel_attach = pids_cancel_attach, .can_fork = pids_can_fork, .cancel_fork = pids_cancel_fork, .release = pids_release, .legacy_cftypes = pids_files_legacy, .dfl_cftypes = pids_files, .threaded = true, };
14 11 11 1 1 1 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. */ #include "rxe.h" #define RXE_POOL_TIMEOUT (200) #define RXE_POOL_ALIGN (16) static const struct rxe_type_info { const char *name; size_t size; size_t elem_offset; void (*cleanup)(struct rxe_pool_elem *elem); u32 min_index; u32 max_index; u32 max_elem; } rxe_type_info[RXE_NUM_TYPES] = { [RXE_TYPE_UC] = { .name = "uc", .size = sizeof(struct rxe_ucontext), .elem_offset = offsetof(struct rxe_ucontext, elem), .min_index = 1, .max_index = RXE_MAX_UCONTEXT, .max_elem = RXE_MAX_UCONTEXT, }, [RXE_TYPE_PD] = { .name = "pd", .size = sizeof(struct rxe_pd), .elem_offset = offsetof(struct rxe_pd, elem), .min_index = 1, .max_index = RXE_MAX_PD, .max_elem = RXE_MAX_PD, }, [RXE_TYPE_AH] = { .name = "ah", .size = sizeof(struct rxe_ah), .elem_offset = offsetof(struct rxe_ah, elem), .min_index = RXE_MIN_AH_INDEX, .max_index = RXE_MAX_AH_INDEX, .max_elem = RXE_MAX_AH, }, [RXE_TYPE_SRQ] = { .name = "srq", .size = sizeof(struct rxe_srq), .elem_offset = offsetof(struct rxe_srq, elem), .cleanup = rxe_srq_cleanup, .min_index = RXE_MIN_SRQ_INDEX, .max_index = RXE_MAX_SRQ_INDEX, .max_elem = RXE_MAX_SRQ, }, [RXE_TYPE_QP] = { .name = "qp", .size = sizeof(struct rxe_qp), .elem_offset = offsetof(struct rxe_qp, elem), .cleanup = rxe_qp_cleanup, .min_index = RXE_MIN_QP_INDEX, .max_index = RXE_MAX_QP_INDEX, .max_elem = RXE_MAX_QP, }, [RXE_TYPE_CQ] = { .name = "cq", .size = sizeof(struct rxe_cq), .elem_offset = offsetof(struct rxe_cq, elem), .cleanup = rxe_cq_cleanup, .min_index = 1, .max_index = RXE_MAX_CQ, .max_elem = RXE_MAX_CQ, }, [RXE_TYPE_MR] = { .name = "mr", .size = sizeof(struct rxe_mr), .elem_offset = offsetof(struct rxe_mr, elem), .cleanup = rxe_mr_cleanup, .min_index = RXE_MIN_MR_INDEX, .max_index = RXE_MAX_MR_INDEX, .max_elem = RXE_MAX_MR, }, [RXE_TYPE_MW] = { .name = "mw", .size = sizeof(struct rxe_mw), .elem_offset = offsetof(struct rxe_mw, elem), .cleanup = rxe_mw_cleanup, .min_index = RXE_MIN_MW_INDEX, .max_index = RXE_MAX_MW_INDEX, .max_elem = RXE_MAX_MW, }, }; void rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool, enum rxe_elem_type type) { const struct rxe_type_info *info = &rxe_type_info[type]; memset(pool, 0, sizeof(*pool)); pool->rxe = rxe; pool->name = info->name; pool->type = type; pool->max_elem = info->max_elem; pool->elem_size = ALIGN(info->size, RXE_POOL_ALIGN); pool->elem_offset = info->elem_offset; pool->cleanup = info->cleanup; atomic_set(&pool->num_elem, 0); xa_init_flags(&pool->xa, XA_FLAGS_ALLOC); pool->limit.min = info->min_index; pool->limit.max = info->max_index; } void rxe_pool_cleanup(struct rxe_pool *pool) { WARN_ON(!xa_empty(&pool->xa)); } int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem, bool sleepable) { int err = -EINVAL; gfp_t gfp_flags; if (atomic_inc_return(&pool->num_elem) > pool->max_elem) goto err_cnt; elem->pool = pool; elem->obj = (u8 *)elem - pool->elem_offset; kref_init(&elem->ref_cnt); init_completion(&elem->complete); /* AH objects are unique in that the create_ah verb * can be called in atomic context. If the create_ah * call is not sleepable use GFP_ATOMIC. */ gfp_flags = sleepable ? GFP_KERNEL : GFP_ATOMIC; if (sleepable) might_sleep(); err = xa_alloc_cyclic(&pool->xa, &elem->index, NULL, pool->limit, &pool->next, gfp_flags); if (err < 0) goto err_cnt; return 0; err_cnt: atomic_dec(&pool->num_elem); return err; } void *rxe_pool_get_index(struct rxe_pool *pool, u32 index) { struct rxe_pool_elem *elem; struct xarray *xa = &pool->xa; void *obj; rcu_read_lock(); elem = xa_load(xa, index); if (elem && kref_get_unless_zero(&elem->ref_cnt)) obj = elem->obj; else obj = NULL; rcu_read_unlock(); return obj; } static void rxe_elem_release(struct kref *kref) { struct rxe_pool_elem *elem = container_of(kref, typeof(*elem), ref_cnt); complete(&elem->complete); } int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable) { struct rxe_pool *pool = elem->pool; struct xarray *xa = &pool->xa; int ret, err = 0; void *xa_ret; if (sleepable) might_sleep(); /* erase xarray entry to prevent looking up * the pool elem from its index */ xa_ret = xa_erase(xa, elem->index); WARN_ON(xa_err(xa_ret)); /* if this is the last call to rxe_put complete the * object. It is safe to touch obj->elem after this since * it is freed below */ __rxe_put(elem); /* wait until all references to the object have been * dropped before final object specific cleanup and * return to rdma-core */ if (sleepable) { if (!completion_done(&elem->complete)) { ret = wait_for_completion_timeout(&elem->complete, msecs_to_jiffies(50000)); /* Shouldn't happen. There are still references to * the object but, rather than deadlock, free the * object or pass back to rdma-core. */ if (WARN_ON(!ret)) err = -ETIMEDOUT; } } else { unsigned long until = jiffies + RXE_POOL_TIMEOUT; /* AH objects are unique in that the destroy_ah verb * can be called in atomic context. This delay * replaces the wait_for_completion call above * when the destroy_ah call is not sleepable */ while (!completion_done(&elem->complete) && time_before(jiffies, until)) mdelay(1); if (WARN_ON(!completion_done(&elem->complete))) err = -ETIMEDOUT; } if (pool->cleanup) pool->cleanup(elem); atomic_dec(&pool->num_elem); return err; } int __rxe_get(struct rxe_pool_elem *elem) { return kref_get_unless_zero(&elem->ref_cnt); } int __rxe_put(struct rxe_pool_elem *elem) { return kref_put(&elem->ref_cnt, rxe_elem_release); } void __rxe_finalize(struct rxe_pool_elem *elem) { void *xa_ret; xa_ret = xa_store(&elem->pool->xa, elem->index, elem, GFP_KERNEL); WARN_ON(xa_err(xa_ret)); }
17 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 /* SPDX-License-Identifier: GPL-2.0 */ /* * Generic RTC interface. * This version contains the part of the user interface to the Real Time Clock * service. It is used with both the legacy mc146818 and also EFI * Struct rtc_time and first 12 ioctl by Paul Gortmaker, 1996 - separated out * from <linux/mc146818rtc.h> to this file for 2.4 kernels. * * Copyright (C) 1999 Hewlett-Packard Co. * Copyright (C) 1999 Stephane Eranian <eranian@hpl.hp.com> */ #ifndef _LINUX_RTC_H_ #define _LINUX_RTC_H_ #include <linux/types.h> #include <linux/interrupt.h> #include <linux/nvmem-provider.h> #include <uapi/linux/rtc.h> extern int rtc_month_days(unsigned int month, unsigned int year); extern int rtc_year_days(unsigned int day, unsigned int month, unsigned int year); extern int rtc_valid_tm(struct rtc_time *tm); extern time64_t rtc_tm_to_time64(struct rtc_time *tm); extern void rtc_time64_to_tm(time64_t time, struct rtc_time *tm); ktime_t rtc_tm_to_ktime(struct rtc_time tm); struct rtc_time rtc_ktime_to_tm(ktime_t kt); /* * rtc_tm_sub - Return the difference in seconds. */ static inline time64_t rtc_tm_sub(struct rtc_time *lhs, struct rtc_time *rhs) { return rtc_tm_to_time64(lhs) - rtc_tm_to_time64(rhs); } #include <linux/device.h> #include <linux/seq_file.h> #include <linux/cdev.h> #include <linux/poll.h> #include <linux/mutex.h> #include <linux/timerqueue.h> #include <linux/workqueue.h> extern const struct class rtc_class; /* * For these RTC methods the device parameter is the physical device * on whatever bus holds the hardware (I2C, Platform, SPI, etc), which * was passed to rtc_device_register(). Its driver_data normally holds * device state, including the rtc_device pointer for the RTC. * * Most of these methods are called with rtc_device.ops_lock held, * through the rtc_*(struct rtc_device *, ...) calls. * * The (current) exceptions are mostly filesystem hooks: * - the proc() hook for procfs */ struct rtc_class_ops { int (*ioctl)(struct device *, unsigned int, unsigned long); int (*read_time)(struct device *, struct rtc_time *); int (*set_time)(struct device *, struct rtc_time *); int (*read_alarm)(struct device *, struct rtc_wkalrm *); int (*set_alarm)(struct device *, struct rtc_wkalrm *); int (*proc)(struct device *, struct seq_file *); int (*alarm_irq_enable)(struct device *, unsigned int enabled); int (*read_offset)(struct device *, long *offset); int (*set_offset)(struct device *, long offset); int (*param_get)(struct device *, struct rtc_param *param); int (*param_set)(struct device *, struct rtc_param *param); }; struct rtc_device; struct rtc_timer { struct timerqueue_node node; ktime_t period; void (*func)(struct rtc_device *rtc); struct rtc_device *rtc; int enabled; }; /* flags */ #define RTC_DEV_BUSY 0 #define RTC_NO_CDEV 1 struct rtc_device { struct device dev; struct module *owner; int id; const struct rtc_class_ops *ops; struct mutex ops_lock; struct cdev char_dev; unsigned long flags; unsigned long irq_data; spinlock_t irq_lock; wait_queue_head_t irq_queue; struct fasync_struct *async_queue; int irq_freq; int max_user_freq; struct timerqueue_head timerqueue; struct rtc_timer aie_timer; struct rtc_timer uie_rtctimer; struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */ int pie_enabled; struct work_struct irqwork; /* * This offset specifies the update timing of the RTC. * * tsched t1 write(t2.tv_sec - 1sec)) t2 RTC increments seconds * * The offset defines how tsched is computed so that the write to * the RTC (t2.tv_sec - 1sec) is correct versus the time required * for the transport of the write and the time which the RTC needs * to increment seconds the first time after the write (t2). * * For direct accessible RTCs tsched ~= t1 because the write time * is negligible. For RTCs behind slow busses the transport time is * significant and has to be taken into account. * * The time between the write (t1) and the first increment after * the write (t2) is RTC specific. For a MC146818 RTC it's 500ms, * for many others it's exactly 1 second. Consult the datasheet. * * The value of this offset is also used to calculate the to be * written value (t2.tv_sec - 1sec) at tsched. * * The default value for this is NSEC_PER_SEC + 10 msec default * transport time. The offset can be adjusted by drivers so the * calculation for the to be written value at tsched becomes * correct: * * newval = tsched + set_offset_nsec - NSEC_PER_SEC * and (tsched + set_offset_nsec) % NSEC_PER_SEC == 0 */ unsigned long set_offset_nsec; unsigned long features[BITS_TO_LONGS(RTC_FEATURE_CNT)]; time64_t range_min; timeu64_t range_max; timeu64_t alarm_offset_max; time64_t start_secs; time64_t offset_secs; bool set_start_time; #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL struct work_struct uie_task; struct timer_list uie_timer; /* Those fields are protected by rtc->irq_lock */ unsigned int oldsecs; unsigned int uie_irq_active:1; unsigned int stop_uie_polling:1; unsigned int uie_task_active:1; unsigned int uie_timer_active:1; #endif }; #define to_rtc_device(d) container_of(d, struct rtc_device, dev) #define rtc_lock(d) mutex_lock(&d->ops_lock) #define rtc_unlock(d) mutex_unlock(&d->ops_lock) /* useful timestamps */ #define RTC_TIMESTAMP_BEGIN_0000 -62167219200ULL /* 0000-01-01 00:00:00 */ #define RTC_TIMESTAMP_BEGIN_1900 -2208988800LL /* 1900-01-01 00:00:00 */ #define RTC_TIMESTAMP_EPOCH_GPS 315964800LL /* 1980-01-06 00:00:00 */ #define RTC_TIMESTAMP_BEGIN_2000 946684800LL /* 2000-01-01 00:00:00 */ #define RTC_TIMESTAMP_END_2063 2966371199LL /* 2063-12-31 23:59:59 */ #define RTC_TIMESTAMP_END_2079 3471292799LL /* 2079-12-31 23:59:59 */ #define RTC_TIMESTAMP_END_2099 4102444799LL /* 2099-12-31 23:59:59 */ #define RTC_TIMESTAMP_END_2199 7258118399LL /* 2199-12-31 23:59:59 */ #define RTC_TIMESTAMP_END_9999 253402300799LL /* 9999-12-31 23:59:59 */ extern struct rtc_device *devm_rtc_device_register(struct device *dev, const char *name, const struct rtc_class_ops *ops, struct module *owner); struct rtc_device *devm_rtc_allocate_device(struct device *dev); int __devm_rtc_register_device(struct module *owner, struct rtc_device *rtc); extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm); extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm); int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm); extern int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alrm); extern int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alrm); extern int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alrm); extern void rtc_update_irq(struct rtc_device *rtc, unsigned long num, unsigned long events); extern struct rtc_device *rtc_class_open(const char *name); extern void rtc_class_close(struct rtc_device *rtc); extern int rtc_irq_set_state(struct rtc_device *rtc, int enabled); extern int rtc_irq_set_freq(struct rtc_device *rtc, int freq); extern int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled); extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled); extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, unsigned int enabled); void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode); void rtc_aie_update_irq(struct rtc_device *rtc); void rtc_uie_update_irq(struct rtc_device *rtc); enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer); void rtc_timer_init(struct rtc_timer *timer, void (*f)(struct rtc_device *r), struct rtc_device *rtc); int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer, ktime_t expires, ktime_t period); void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer); int rtc_read_offset(struct rtc_device *rtc, long *offset); int rtc_set_offset(struct rtc_device *rtc, long offset); void rtc_timer_do_work(struct work_struct *work); static inline bool is_leap_year(unsigned int year) { return (!(year % 4) && (year % 100)) || !(year % 400); } /** * rtc_bound_alarmtime() - Return alarm time bound by rtc limit * @rtc: Pointer to rtc device structure * @requested: Requested alarm timeout * * Return: Alarm timeout bound by maximum alarm time supported by rtc. */ static inline ktime_t rtc_bound_alarmtime(struct rtc_device *rtc, ktime_t requested) { if (rtc->alarm_offset_max && rtc->alarm_offset_max * MSEC_PER_SEC < ktime_to_ms(requested)) return ms_to_ktime(rtc->alarm_offset_max * MSEC_PER_SEC); return requested; } #define devm_rtc_register_device(device) \ __devm_rtc_register_device(THIS_MODULE, device) #ifdef CONFIG_RTC_HCTOSYS_DEVICE extern int rtc_hctosys_ret; #else #define rtc_hctosys_ret -ENODEV #endif #ifdef CONFIG_RTC_NVMEM int devm_rtc_nvmem_register(struct rtc_device *rtc, struct nvmem_config *nvmem_config); #else static inline int devm_rtc_nvmem_register(struct rtc_device *rtc, struct nvmem_config *nvmem_config) { return 0; } #endif #ifdef CONFIG_RTC_INTF_SYSFS int rtc_add_group(struct rtc_device *rtc, const struct attribute_group *grp); int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps); #else static inline int rtc_add_group(struct rtc_device *rtc, const struct attribute_group *grp) { return 0; } static inline int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps) { return 0; } #endif #endif /* _LINUX_RTC_H_ */
10218 10259 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NET_FLOW_DISSECTOR_H #define _NET_FLOW_DISSECTOR_H #include <linux/types.h> #include <linux/in6.h> #include <linux/siphash.h> #include <linux/string.h> #include <uapi/linux/if_ether.h> #include <uapi/linux/pkt_cls.h> struct bpf_prog; struct net; struct sk_buff; /** * struct flow_dissector_key_control: * @thoff: Transport header offset * @addr_type: Type of key. One of FLOW_DISSECTOR_KEY_* * @flags: Key flags. * Any of FLOW_DIS_(IS_FRAGMENT|FIRST_FRAG|ENCAPSULATION|F_*) */ struct flow_dissector_key_control { u16 thoff; u16 addr_type; u32 flags; }; /* The control flags are kept in sync with TCA_FLOWER_KEY_FLAGS_*, as those * flags are exposed to userspace in some error paths, ie. unsupported flags. */ enum flow_dissector_ctrl_flags { FLOW_DIS_IS_FRAGMENT = TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_FIRST_FRAG = TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST, FLOW_DIS_F_TUNNEL_CSUM = TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM, FLOW_DIS_F_TUNNEL_DONT_FRAGMENT = TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT, FLOW_DIS_F_TUNNEL_OAM = TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM, FLOW_DIS_F_TUNNEL_CRIT_OPT = TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT, /* These flags are internal to the kernel */ FLOW_DIS_ENCAPSULATION = (TCA_FLOWER_KEY_FLAGS_MAX << 1), }; enum flow_dissect_ret { FLOW_DISSECT_RET_OUT_GOOD, FLOW_DISSECT_RET_OUT_BAD, FLOW_DISSECT_RET_PROTO_AGAIN, FLOW_DISSECT_RET_IPPROTO_AGAIN, FLOW_DISSECT_RET_CONTINUE, }; /** * struct flow_dissector_key_basic: * @n_proto: Network header protocol (eg. IPv4/IPv6) * @ip_proto: Transport header protocol (eg. TCP/UDP) * @padding: Unused */ struct flow_dissector_key_basic { __be16 n_proto; u8 ip_proto; u8 padding; }; struct flow_dissector_key_tags { u32 flow_label; }; struct flow_dissector_key_vlan { union { struct { u16 vlan_id:12, vlan_dei:1, vlan_priority:3; }; __be16 vlan_tci; }; __be16 vlan_tpid; __be16 vlan_eth_type; u16 padding; }; struct flow_dissector_mpls_lse { u32 mpls_ttl:8, mpls_bos:1, mpls_tc:3, mpls_label:20; }; #define FLOW_DIS_MPLS_MAX 7 struct flow_dissector_key_mpls { struct flow_dissector_mpls_lse ls[FLOW_DIS_MPLS_MAX]; /* Label Stack */ u8 used_lses; /* One bit set for each Label Stack Entry in use */ }; static inline void dissector_set_mpls_lse(struct flow_dissector_key_mpls *mpls, int lse_index) { mpls->used_lses |= 1 << lse_index; } #define FLOW_DIS_TUN_OPTS_MAX 255 /** * struct flow_dissector_key_enc_opts: * @data: tunnel option data * @len: length of tunnel option data * @dst_opt_type: tunnel option type */ struct flow_dissector_key_enc_opts { u8 data[FLOW_DIS_TUN_OPTS_MAX]; /* Using IP_TUNNEL_OPTS_MAX is desired * here but seems difficult to #include */ u8 len; u32 dst_opt_type; }; struct flow_dissector_key_keyid { __be32 keyid; }; /** * struct flow_dissector_key_ipv4_addrs: * @src: source ip address * @dst: destination ip address */ struct flow_dissector_key_ipv4_addrs { /* (src,dst) must be grouped, in the same way than in IP header */ __be32 src; __be32 dst; }; /** * struct flow_dissector_key_ipv6_addrs: * @src: source ip address * @dst: destination ip address */ struct flow_dissector_key_ipv6_addrs { /* (src,dst) must be grouped, in the same way than in IP header */ struct in6_addr src; struct in6_addr dst; }; /** * struct flow_dissector_key_tipc: * @key: source node address combined with selector */ struct flow_dissector_key_tipc { __be32 key; }; /** * struct flow_dissector_key_addrs: * @v4addrs: IPv4 addresses * @v6addrs: IPv6 addresses * @tipckey: TIPC key */ struct flow_dissector_key_addrs { union { struct flow_dissector_key_ipv4_addrs v4addrs; struct flow_dissector_key_ipv6_addrs v6addrs; struct flow_dissector_key_tipc tipckey; }; }; /** * struct flow_dissector_key_arp: * @sip: Sender IP address * @tip: Target IP address * @op: Operation * @sha: Sender hardware address * @tha: Target hardware address */ struct flow_dissector_key_arp { __u32 sip; __u32 tip; __u8 op; unsigned char sha[ETH_ALEN]; unsigned char tha[ETH_ALEN]; }; /** * struct flow_dissector_key_ports: * @ports: port numbers of Transport header * @src: source port number * @dst: destination port number */ struct flow_dissector_key_ports { union { __be32 ports; struct { __be16 src; __be16 dst; }; }; }; /** * struct flow_dissector_key_ports_range * @tp: port number from packet * @tp_min: min port number in range * @tp_max: max port number in range */ struct flow_dissector_key_ports_range { union { struct flow_dissector_key_ports tp; struct { struct flow_dissector_key_ports tp_min; struct flow_dissector_key_ports tp_max; }; }; }; /** * struct flow_dissector_key_icmp: * @type: ICMP type * @code: ICMP code * @id: Session identifier */ struct flow_dissector_key_icmp { struct { u8 type; u8 code; }; u16 id; }; /** * struct flow_dissector_key_eth_addrs: * @src: source Ethernet address * @dst: destination Ethernet address */ struct flow_dissector_key_eth_addrs { /* (dst,src) must be grouped, in the same way than in ETH header */ unsigned char dst[ETH_ALEN]; unsigned char src[ETH_ALEN]; }; /** * struct flow_dissector_key_tcp: * @flags: flags */ struct flow_dissector_key_tcp { __be16 flags; }; /** * struct flow_dissector_key_ip: * @tos: tos * @ttl: ttl */ struct flow_dissector_key_ip { __u8 tos; __u8 ttl; }; /** * struct flow_dissector_key_meta: * @ingress_ifindex: ingress ifindex * @ingress_iftype: ingress interface type * @l2_miss: packet did not match an L2 entry during forwarding */ struct flow_dissector_key_meta { int ingress_ifindex; u16 ingress_iftype; u8 l2_miss; }; /** * struct flow_dissector_key_ct: * @ct_state: conntrack state after converting with map * @ct_mark: conttrack mark * @ct_zone: conntrack zone * @ct_labels: conntrack labels */ struct flow_dissector_key_ct { u16 ct_state; u16 ct_zone; u32 ct_mark; u32 ct_labels[4]; }; /** * struct flow_dissector_key_hash: * @hash: hash value */ struct flow_dissector_key_hash { u32 hash; }; /** * struct flow_dissector_key_num_of_vlans: * @num_of_vlans: num_of_vlans value */ struct flow_dissector_key_num_of_vlans { u8 num_of_vlans; }; /** * struct flow_dissector_key_pppoe: * @session_id: pppoe session id * @ppp_proto: ppp protocol * @type: pppoe eth type */ struct flow_dissector_key_pppoe { __be16 session_id; __be16 ppp_proto; __be16 type; }; /** * struct flow_dissector_key_l2tpv3: * @session_id: identifier for a l2tp session */ struct flow_dissector_key_l2tpv3 { __be32 session_id; }; /** * struct flow_dissector_key_ipsec: * @spi: identifier for a ipsec connection */ struct flow_dissector_key_ipsec { __be32 spi; }; /** * struct flow_dissector_key_cfm * @mdl_ver: maintenance domain level (mdl) and cfm protocol version * @opcode: code specifying a type of cfm protocol packet * * See 802.1ag, ITU-T G.8013/Y.1731 * 1 2 * |7 6 5 4 3 2 1 0|7 6 5 4 3 2 1 0| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | mdl | version | opcode | * +-----+---------+-+-+-+-+-+-+-+-+ */ struct flow_dissector_key_cfm { u8 mdl_ver; u8 opcode; }; #define FLOW_DIS_CFM_MDL_MASK GENMASK(7, 5) #define FLOW_DIS_CFM_MDL_MAX 7 enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */ FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */ FLOW_DISSECTOR_KEY_IPV4_ADDRS, /* struct flow_dissector_key_ipv4_addrs */ FLOW_DISSECTOR_KEY_IPV6_ADDRS, /* struct flow_dissector_key_ipv6_addrs */ FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */ FLOW_DISSECTOR_KEY_PORTS_RANGE, /* struct flow_dissector_key_ports */ FLOW_DISSECTOR_KEY_ICMP, /* struct flow_dissector_key_icmp */ FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */ FLOW_DISSECTOR_KEY_TIPC, /* struct flow_dissector_key_tipc */ FLOW_DISSECTOR_KEY_ARP, /* struct flow_dissector_key_arp */ FLOW_DISSECTOR_KEY_VLAN, /* struct flow_dissector_key_vlan */ FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_tags */ FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */ FLOW_DISSECTOR_KEY_MPLS_ENTROPY, /* struct flow_dissector_key_keyid */ FLOW_DISSECTOR_KEY_ENC_KEYID, /* struct flow_dissector_key_keyid */ FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, /* struct flow_dissector_key_ipv4_addrs */ FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, /* struct flow_dissector_key_ipv6_addrs */ FLOW_DISSECTOR_KEY_ENC_CONTROL, /* struct flow_dissector_key_control */ FLOW_DISSECTOR_KEY_ENC_PORTS, /* struct flow_dissector_key_ports */ FLOW_DISSECTOR_KEY_MPLS, /* struct flow_dissector_key_mpls */ FLOW_DISSECTOR_KEY_TCP, /* struct flow_dissector_key_tcp */ FLOW_DISSECTOR_KEY_IP, /* struct flow_dissector_key_ip */ FLOW_DISSECTOR_KEY_CVLAN, /* struct flow_dissector_key_vlan */ FLOW_DISSECTOR_KEY_ENC_IP, /* struct flow_dissector_key_ip */ FLOW_DISSECTOR_KEY_ENC_OPTS, /* struct flow_dissector_key_enc_opts */ FLOW_DISSECTOR_KEY_META, /* struct flow_dissector_key_meta */ FLOW_DISSECTOR_KEY_CT, /* struct flow_dissector_key_ct */ FLOW_DISSECTOR_KEY_HASH, /* struct flow_dissector_key_hash */ FLOW_DISSECTOR_KEY_NUM_OF_VLANS, /* struct flow_dissector_key_num_of_vlans */ FLOW_DISSECTOR_KEY_PPPOE, /* struct flow_dissector_key_pppoe */ FLOW_DISSECTOR_KEY_L2TPV3, /* struct flow_dissector_key_l2tpv3 */ FLOW_DISSECTOR_KEY_CFM, /* struct flow_dissector_key_cfm */ FLOW_DISSECTOR_KEY_IPSEC, /* struct flow_dissector_key_ipsec */ FLOW_DISSECTOR_KEY_MAX, }; #define FLOW_DISSECTOR_F_PARSE_1ST_FRAG BIT(0) #define FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL BIT(1) #define FLOW_DISSECTOR_F_STOP_AT_ENCAP BIT(2) #define FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP BIT(3) struct flow_dissector_key { enum flow_dissector_key_id key_id; size_t offset; /* offset of struct flow_dissector_key_* in target the struct */ }; struct flow_dissector { unsigned long long used_keys; /* each bit represents presence of one key id */ unsigned short int offset[FLOW_DISSECTOR_KEY_MAX]; }; struct flow_keys_basic { struct flow_dissector_key_control control; struct flow_dissector_key_basic basic; }; struct flow_keys { struct flow_dissector_key_control control; #define FLOW_KEYS_HASH_START_FIELD basic struct flow_dissector_key_basic basic __aligned(SIPHASH_ALIGNMENT); struct flow_dissector_key_tags tags; struct flow_dissector_key_vlan vlan; struct flow_dissector_key_vlan cvlan; struct flow_dissector_key_keyid keyid; struct flow_dissector_key_ports ports; struct flow_dissector_key_icmp icmp; /* 'addrs' must be the last member */ struct flow_dissector_key_addrs addrs; }; #define FLOW_KEYS_HASH_OFFSET \ offsetof(struct flow_keys, FLOW_KEYS_HASH_START_FIELD) __be32 flow_get_u32_src(const struct flow_keys *flow); __be32 flow_get_u32_dst(const struct flow_keys *flow); extern struct flow_dissector flow_keys_dissector; extern struct flow_dissector flow_keys_basic_dissector; /* struct flow_keys_digest: * * This structure is used to hold a digest of the full flow keys. This is a * larger "hash" of a flow to allow definitively matching specific flows where * the 32 bit skb->hash is not large enough. The size is limited to 16 bytes so * that it can be used in CB of skb (see sch_choke for an example). */ #define FLOW_KEYS_DIGEST_LEN 16 struct flow_keys_digest { u8 data[FLOW_KEYS_DIGEST_LEN]; }; void make_flow_keys_digest(struct flow_keys_digest *digest, const struct flow_keys *flow); static inline bool flow_keys_have_l4(const struct flow_keys *keys) { return (keys->ports.ports || keys->tags.flow_label); } u32 flow_hash_from_keys(struct flow_keys *keys); u32 flow_hash_from_keys_seed(struct flow_keys *keys, const siphash_key_t *keyval); void skb_flow_get_icmp_tci(const struct sk_buff *skb, struct flow_dissector_key_icmp *key_icmp, const void *data, int thoff, int hlen); static inline bool dissector_uses_key(const struct flow_dissector *flow_dissector, enum flow_dissector_key_id key_id) { return flow_dissector->used_keys & (1ULL << key_id); } static inline void *skb_flow_dissector_target(struct flow_dissector *flow_dissector, enum flow_dissector_key_id key_id, void *target_container) { return ((char *)target_container) + flow_dissector->offset[key_id]; } struct bpf_flow_dissector { struct bpf_flow_keys *flow_keys; const struct sk_buff *skb; const void *data; const void *data_end; }; static inline void flow_dissector_init_keys(struct flow_dissector_key_control *key_control, struct flow_dissector_key_basic *key_basic) { memset(key_control, 0, sizeof(*key_control)); memset(key_basic, 0, sizeof(*key_basic)); } #ifdef CONFIG_BPF_SYSCALL int flow_dissector_bpf_prog_attach_check(struct net *net, struct bpf_prog *prog); #endif /* CONFIG_BPF_SYSCALL */ #endif
138 6 57 57 57 46 15 11 11 2 2 2 2 1 1 1 3 2 1 11 11 15 14 21 4 3 3 3 4 24 17 1 21 2 3 2 2 3 2 1 1 1 2 2 2 1 1 1 1 1 6 5 4 2 2 1 1 24 5 3 3 6 1 6 5 1 2 2 73 22 51 9 42 39 8 8 7 4 4 1 4 4 6 14 68 5 5 5 6 2 2 3 4 4 4 5 15 15 3 3 2 7 15 3 2 2 2 17 3 6 1 1 2 58 1 5 24 1 1 2 2 6 5 3 3 2 3 3 38 37 108 112 109 42 73 111 68 68 67 5 65 21 45 8 106 18 17 19 19 12 9 9 9 9 7 8 9 12 3 3 1 1 8 1 8 8 18 16 16 15 14 13 11 16 2 8 4 2 3 3 2 2 1 1 3 8 4 1 1 1 3 3 2 2 1 3 10 6 3 4 4 3 3 1 2 5 10 9 9 8 7 6 11 11 10 5 6 4 3 1 3 2 2 1 1 1 2 4 4 18 16 16 3 2 2 13 18 28 18 1 1 1 1 1 2 1 4 1 1 1 1 19 7 1 1 1 2 1 4 1 1 1 33 3 11 73 10 73 72 72 73 73 1 73 7 66 4 61 29 34 73 18 18 18 18 3 1 3 2 18 1 17 18 1 18 18 18 18 18 18 18 18 148 149 151 18 151 3 3 1 2 3 3 3 3 3 2 2 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 // SPDX-License-Identifier: GPL-2.0 /* Multipath TCP * * Copyright (c) 2021, Red Hat. */ #define pr_fmt(fmt) "MPTCP: " fmt #include <linux/kernel.h> #include <linux/module.h> #include <net/sock.h> #include <net/protocol.h> #include <net/tcp.h> #include <net/mptcp.h> #include "protocol.h" #define MIN_INFO_OPTLEN_SIZE 16 #define MIN_FULL_INFO_OPTLEN_SIZE 40 static struct sock *__mptcp_tcp_fallback(struct mptcp_sock *msk) { msk_owned_by_me(msk); if (likely(!__mptcp_check_fallback(msk))) return NULL; return msk->first; } static u32 sockopt_seq_reset(const struct sock *sk) { sock_owned_by_me(sk); /* Highbits contain state. Allows to distinguish sockopt_seq * of listener and established: * s0 = new_listener() * sockopt(s0) - seq is 1 * s1 = accept(s0) - s1 inherits seq 1 if listener sk (s0) * sockopt(s0) - seq increments to 2 on s0 * sockopt(s1) // seq increments to 2 on s1 (different option) * new ssk completes join, inherits options from s0 // seq 2 * Needs sync from mptcp join logic, but ssk->seq == msk->seq * * Set High order bits to sk_state so ssk->seq == msk->seq test * will fail. */ return (u32)sk->sk_state << 24u; } static void sockopt_seq_inc(struct mptcp_sock *msk) { u32 seq = (msk->setsockopt_seq + 1) & 0x00ffffff; msk->setsockopt_seq = sockopt_seq_reset((struct sock *)msk) + seq; } static int mptcp_get_int_option(struct mptcp_sock *msk, sockptr_t optval, unsigned int optlen, int *val) { if (optlen < sizeof(int)) return -EINVAL; if (copy_from_sockptr(val, optval, sizeof(*val))) return -EFAULT; return 0; } static void mptcp_sol_socket_sync_intval(struct mptcp_sock *msk, int optname, int val) { struct mptcp_subflow_context *subflow; struct sock *sk = (struct sock *)msk; lock_sock(sk); sockopt_seq_inc(msk); mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); bool slow = lock_sock_fast(ssk); switch (optname) { case SO_DEBUG: sock_valbool_flag(ssk, SOCK_DBG, !!val); break; case SO_KEEPALIVE: if (ssk->sk_prot->keepalive) ssk->sk_prot->keepalive(ssk, !!val); sock_valbool_flag(ssk, SOCK_KEEPOPEN, !!val); break; case SO_PRIORITY: WRITE_ONCE(ssk->sk_priority, val); break; case SO_SNDBUF: case SO_SNDBUFFORCE: ssk->sk_userlocks |= SOCK_SNDBUF_LOCK; WRITE_ONCE(ssk->sk_sndbuf, sk->sk_sndbuf); mptcp_subflow_ctx(ssk)->cached_sndbuf = sk->sk_sndbuf; break; case SO_RCVBUF: case SO_RCVBUFFORCE: ssk->sk_userlocks |= SOCK_RCVBUF_LOCK; WRITE_ONCE(ssk->sk_rcvbuf, sk->sk_rcvbuf); break; case SO_MARK: if (READ_ONCE(ssk->sk_mark) != sk->sk_mark) { WRITE_ONCE(ssk->sk_mark, sk->sk_mark); sk_dst_reset(ssk); } break; case SO_INCOMING_CPU: WRITE_ONCE(ssk->sk_incoming_cpu, val); break; } subflow->setsockopt_seq = msk->setsockopt_seq; unlock_sock_fast(ssk, slow); } release_sock(sk); } static int mptcp_sol_socket_intval(struct mptcp_sock *msk, int optname, int val) { sockptr_t optval = KERNEL_SOCKPTR(&val); struct sock *sk = (struct sock *)msk; int ret; ret = sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, optval, sizeof(val)); if (ret) return ret; mptcp_sol_socket_sync_intval(msk, optname, val); return 0; } static void mptcp_so_incoming_cpu(struct mptcp_sock *msk, int val) { struct sock *sk = (struct sock *)msk; WRITE_ONCE(sk->sk_incoming_cpu, val); mptcp_sol_socket_sync_intval(msk, SO_INCOMING_CPU, val); } static int mptcp_setsockopt_sol_socket_tstamp(struct mptcp_sock *msk, int optname, int val) { sockptr_t optval = KERNEL_SOCKPTR(&val); struct mptcp_subflow_context *subflow; struct sock *sk = (struct sock *)msk; int ret; ret = sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, optval, sizeof(val)); if (ret) return ret; lock_sock(sk); mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); bool slow = lock_sock_fast(ssk); sock_set_timestamp(sk, optname, !!val); unlock_sock_fast(ssk, slow); } release_sock(sk); return 0; } static int mptcp_setsockopt_sol_socket_int(struct mptcp_sock *msk, int optname, sockptr_t optval, unsigned int optlen) { int val, ret; ret = mptcp_get_int_option(msk, optval, optlen, &val); if (ret) return ret; switch (optname) { case SO_KEEPALIVE: case SO_DEBUG: case SO_MARK: case SO_PRIORITY: case SO_SNDBUF: case SO_SNDBUFFORCE: case SO_RCVBUF: case SO_RCVBUFFORCE: return mptcp_sol_socket_intval(msk, optname, val); case SO_INCOMING_CPU: mptcp_so_incoming_cpu(msk, val); return 0; case SO_TIMESTAMP_OLD: case SO_TIMESTAMP_NEW: case SO_TIMESTAMPNS_OLD: case SO_TIMESTAMPNS_NEW: return mptcp_setsockopt_sol_socket_tstamp(msk, optname, val); } return -ENOPROTOOPT; } static int mptcp_setsockopt_sol_socket_timestamping(struct mptcp_sock *msk, int optname, sockptr_t optval, unsigned int optlen) { struct mptcp_subflow_context *subflow; struct sock *sk = (struct sock *)msk; struct so_timestamping timestamping; int ret; if (optlen == sizeof(timestamping)) { if (copy_from_sockptr(&timestamping, optval, sizeof(timestamping))) return -EFAULT; } else if (optlen == sizeof(int)) { memset(&timestamping, 0, sizeof(timestamping)); if (copy_from_sockptr(&timestamping.flags, optval, sizeof(int))) return -EFAULT; } else { return -EINVAL; } ret = sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, KERNEL_SOCKPTR(&timestamping), sizeof(timestamping)); if (ret) return ret; lock_sock(sk); mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); bool slow = lock_sock_fast(ssk); sock_set_timestamping(sk, optname, timestamping); unlock_sock_fast(ssk, slow); } release_sock(sk); return 0; } static int mptcp_setsockopt_sol_socket_linger(struct mptcp_sock *msk, sockptr_t optval, unsigned int optlen) { struct mptcp_subflow_context *subflow; struct sock *sk = (struct sock *)msk; struct linger ling; sockptr_t kopt; int ret; if (optlen < sizeof(ling)) return -EINVAL; if (copy_from_sockptr(&ling, optval, sizeof(ling))) return -EFAULT; kopt = KERNEL_SOCKPTR(&ling); ret = sock_setsockopt(sk->sk_socket, SOL_SOCKET, SO_LINGER, kopt, sizeof(ling)); if (ret) return ret; lock_sock(sk); sockopt_seq_inc(msk); mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); bool slow = lock_sock_fast(ssk); if (!ling.l_onoff) { sock_reset_flag(ssk, SOCK_LINGER); } else { ssk->sk_lingertime = sk->sk_lingertime; sock_set_flag(ssk, SOCK_LINGER); } subflow->setsockopt_seq = msk->setsockopt_seq; unlock_sock_fast(ssk, slow); } release_sock(sk); return 0; } static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = (struct sock *)msk; struct sock *ssk; int ret; switch (optname) { case SO_REUSEPORT: case SO_REUSEADDR: case SO_BINDTODEVICE: case SO_BINDTOIFINDEX: lock_sock(sk); ssk = __mptcp_nmpc_sk(msk); if (IS_ERR(ssk)) { release_sock(sk); return PTR_ERR(ssk); } ret = sk_setsockopt(ssk, SOL_SOCKET, optname, optval, optlen); if (ret == 0) { if (optname == SO_REUSEPORT) sk->sk_reuseport = ssk->sk_reuseport; else if (optname == SO_REUSEADDR) sk->sk_reuse = ssk->sk_reuse; else if (optname == SO_BINDTODEVICE) sk->sk_bound_dev_if = ssk->sk_bound_dev_if; else if (optname == SO_BINDTOIFINDEX) sk->sk_bound_dev_if = ssk->sk_bound_dev_if; } release_sock(sk); return ret; case SO_KEEPALIVE: case SO_PRIORITY: case SO_SNDBUF: case SO_SNDBUFFORCE: case SO_RCVBUF: case SO_RCVBUFFORCE: case SO_MARK: case SO_INCOMING_CPU: case SO_DEBUG: case SO_TIMESTAMP_OLD: case SO_TIMESTAMP_NEW: case SO_TIMESTAMPNS_OLD: case SO_TIMESTAMPNS_NEW: return mptcp_setsockopt_sol_socket_int(msk, optname, optval, optlen); case SO_TIMESTAMPING_OLD: case SO_TIMESTAMPING_NEW: return mptcp_setsockopt_sol_socket_timestamping(msk, optname, optval, optlen); case SO_LINGER: return mptcp_setsockopt_sol_socket_linger(msk, optval, optlen); case SO_RCVLOWAT: case SO_RCVTIMEO_OLD: case SO_RCVTIMEO_NEW: case SO_SNDTIMEO_OLD: case SO_SNDTIMEO_NEW: case SO_BUSY_POLL: case SO_PREFER_BUSY_POLL: case SO_BUSY_POLL_BUDGET: /* No need to copy: only relevant for msk */ return sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, optval, optlen); case SO_NO_CHECK: case SO_DONTROUTE: case SO_BROADCAST: case SO_BSDCOMPAT: case SO_PASSCRED: case SO_PASSPIDFD: case SO_PASSSEC: case SO_RXQ_OVFL: case SO_WIFI_STATUS: case SO_NOFCS: case SO_SELECT_ERR_QUEUE: return 0; } /* SO_OOBINLINE is not supported, let's avoid the related mess * SO_ATTACH_FILTER, SO_ATTACH_BPF, SO_ATTACH_REUSEPORT_CBPF, * SO_DETACH_REUSEPORT_BPF, SO_DETACH_FILTER, SO_LOCK_FILTER, * we must be careful with subflows * * SO_ATTACH_REUSEPORT_EBPF is not supported, at it checks * explicitly the sk_protocol field * * SO_PEEK_OFF is unsupported, as it is for plain TCP * SO_MAX_PACING_RATE is unsupported, we must be careful with subflows * SO_CNX_ADVICE is currently unsupported, could possibly be relevant, * but likely needs careful design * * SO_ZEROCOPY is currently unsupported, TODO in sndmsg * SO_TXTIME is currently unsupported */ return -EOPNOTSUPP; } static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = (struct sock *)msk; int ret = -EOPNOTSUPP; struct sock *ssk; switch (optname) { case IPV6_V6ONLY: case IPV6_TRANSPARENT: case IPV6_FREEBIND: lock_sock(sk); ssk = __mptcp_nmpc_sk(msk); if (IS_ERR(ssk)) { release_sock(sk); return PTR_ERR(ssk); } ret = tcp_setsockopt(ssk, SOL_IPV6, optname, optval, optlen); if (ret != 0) { release_sock(sk); return ret; } sockopt_seq_inc(msk); switch (optname) { case IPV6_V6ONLY: sk->sk_ipv6only = ssk->sk_ipv6only; break; case IPV6_TRANSPARENT: inet_assign_bit(TRANSPARENT, sk, inet_test_bit(TRANSPARENT, ssk)); break; case IPV6_FREEBIND: inet_assign_bit(FREEBIND, sk, inet_test_bit(FREEBIND, ssk)); break; } release_sock(sk); break; } return ret; } static bool mptcp_supported_sockopt(int level, int optname) { if (level == SOL_IP) { switch (optname) { /* should work fine */ case IP_FREEBIND: case IP_TRANSPARENT: case IP_BIND_ADDRESS_NO_PORT: case IP_LOCAL_PORT_RANGE: /* the following are control cmsg related */ case IP_PKTINFO: case IP_RECVTTL: case IP_RECVTOS: case IP_RECVOPTS: case IP_RETOPTS: case IP_PASSSEC: case IP_RECVORIGDSTADDR: case IP_CHECKSUM: case IP_RECVFRAGSIZE: /* common stuff that need some love */ case IP_TOS: case IP_TTL: case IP_MTU_DISCOVER: case IP_RECVERR: /* possibly less common may deserve some love */ case IP_MINTTL: /* the following is apparently a no-op for plain TCP */ case IP_RECVERR_RFC4884: return true; } /* IP_OPTIONS is not supported, needs subflow care */ /* IP_HDRINCL, IP_NODEFRAG are not supported, RAW specific */ /* IP_MULTICAST_TTL, IP_MULTICAST_LOOP, IP_UNICAST_IF, * IP_ADD_MEMBERSHIP, IP_ADD_SOURCE_MEMBERSHIP, IP_DROP_MEMBERSHIP, * IP_DROP_SOURCE_MEMBERSHIP, IP_BLOCK_SOURCE, IP_UNBLOCK_SOURCE, * MCAST_JOIN_GROUP, MCAST_LEAVE_GROUP MCAST_JOIN_SOURCE_GROUP, * MCAST_LEAVE_SOURCE_GROUP, MCAST_BLOCK_SOURCE, MCAST_UNBLOCK_SOURCE, * MCAST_MSFILTER, IP_MULTICAST_ALL are not supported, better not deal * with mcast stuff */ /* IP_IPSEC_POLICY, IP_XFRM_POLICY are nut supported, unrelated here */ return false; } if (level == SOL_IPV6) { switch (optname) { case IPV6_V6ONLY: /* the following are control cmsg related */ case IPV6_RECVPKTINFO: case IPV6_2292PKTINFO: case IPV6_RECVHOPLIMIT: case IPV6_2292HOPLIMIT: case IPV6_RECVRTHDR: case IPV6_2292RTHDR: case IPV6_RECVHOPOPTS: case IPV6_2292HOPOPTS: case IPV6_RECVDSTOPTS: case IPV6_2292DSTOPTS: case IPV6_RECVTCLASS: case IPV6_FLOWINFO: case IPV6_RECVPATHMTU: case IPV6_RECVORIGDSTADDR: case IPV6_RECVFRAGSIZE: /* the following ones need some love but are quite common */ case IPV6_TCLASS: case IPV6_TRANSPARENT: case IPV6_FREEBIND: case IPV6_PKTINFO: case IPV6_2292PKTOPTIONS: case IPV6_UNICAST_HOPS: case IPV6_MTU_DISCOVER: case IPV6_MTU: case IPV6_RECVERR: case IPV6_FLOWINFO_SEND: case IPV6_FLOWLABEL_MGR: case IPV6_MINHOPCOUNT: case IPV6_DONTFRAG: case IPV6_AUTOFLOWLABEL: /* the following one is a no-op for plain TCP */ case IPV6_RECVERR_RFC4884: return true; } /* IPV6_HOPOPTS, IPV6_RTHDRDSTOPTS, IPV6_RTHDR, IPV6_DSTOPTS are * not supported */ /* IPV6_MULTICAST_HOPS, IPV6_MULTICAST_LOOP, IPV6_UNICAST_IF, * IPV6_MULTICAST_IF, IPV6_ADDRFORM, * IPV6_ADD_MEMBERSHIP, IPV6_DROP_MEMBERSHIP, IPV6_JOIN_ANYCAST, * IPV6_LEAVE_ANYCAST, IPV6_MULTICAST_ALL, MCAST_JOIN_GROUP, MCAST_LEAVE_GROUP, * MCAST_JOIN_SOURCE_GROUP, MCAST_LEAVE_SOURCE_GROUP, * MCAST_BLOCK_SOURCE, MCAST_UNBLOCK_SOURCE, MCAST_MSFILTER * are not supported better not deal with mcast */ /* IPV6_ROUTER_ALERT, IPV6_ROUTER_ALERT_ISOLATE are not supported, since are evil */ /* IPV6_IPSEC_POLICY, IPV6_XFRM_POLICY are not supported */ /* IPV6_ADDR_PREFERENCES is not supported, we must be careful with subflows */ return false; } if (level == SOL_TCP) { switch (optname) { /* the following are no-op or should work just fine */ case TCP_THIN_DUPACK: case TCP_DEFER_ACCEPT: /* the following need some love */ case TCP_MAXSEG: case TCP_NODELAY: case TCP_THIN_LINEAR_TIMEOUTS: case TCP_CONGESTION: case TCP_CORK: case TCP_KEEPIDLE: case TCP_KEEPINTVL: case TCP_KEEPCNT: case TCP_SYNCNT: case TCP_SAVE_SYN: case TCP_LINGER2: case TCP_WINDOW_CLAMP: case TCP_QUICKACK: case TCP_USER_TIMEOUT: case TCP_TIMESTAMP: case TCP_NOTSENT_LOWAT: case TCP_TX_DELAY: case TCP_INQ: case TCP_FASTOPEN: case TCP_FASTOPEN_CONNECT: case TCP_FASTOPEN_KEY: case TCP_FASTOPEN_NO_COOKIE: return true; } /* TCP_MD5SIG, TCP_MD5SIG_EXT are not supported, MD5 is not compatible with MPTCP */ /* TCP_REPAIR, TCP_REPAIR_QUEUE, TCP_QUEUE_SEQ, TCP_REPAIR_OPTIONS, * TCP_REPAIR_WINDOW are not supported, better avoid this mess */ } return false; } static int mptcp_setsockopt_sol_tcp_congestion(struct mptcp_sock *msk, sockptr_t optval, unsigned int optlen) { struct mptcp_subflow_context *subflow; struct sock *sk = (struct sock *)msk; char name[TCP_CA_NAME_MAX]; bool cap_net_admin; int ret; if (optlen < 1) return -EINVAL; ret = strncpy_from_sockptr(name, optval, min_t(long, TCP_CA_NAME_MAX - 1, optlen)); if (ret < 0) return -EFAULT; name[ret] = 0; cap_net_admin = ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN); ret = 0; lock_sock(sk); sockopt_seq_inc(msk); mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); int err; lock_sock(ssk); err = tcp_set_congestion_control(ssk, name, true, cap_net_admin); if (err < 0 && ret == 0) ret = err; subflow->setsockopt_seq = msk->setsockopt_seq; release_sock(ssk); } if (ret == 0) strscpy(msk->ca_name, name, sizeof(msk->ca_name)); release_sock(sk); return ret; } static int __mptcp_setsockopt_set_val(struct mptcp_sock *msk, int max, int (*set_val)(struct sock *, int), int *msk_val, int val) { struct mptcp_subflow_context *subflow; int err = 0; mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); int ret; lock_sock(ssk); ret = set_val(ssk, val); err = err ? : ret; release_sock(ssk); } if (!err) { *msk_val = val; sockopt_seq_inc(msk); } return err; } static int __mptcp_setsockopt_sol_tcp_cork(struct mptcp_sock *msk, int val) { struct mptcp_subflow_context *subflow; struct sock *sk = (struct sock *)msk; sockopt_seq_inc(msk); msk->cork = !!val; mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); lock_sock(ssk); __tcp_sock_set_cork(ssk, !!val); release_sock(ssk); } if (!val) mptcp_check_and_set_pending(sk); return 0; } static int __mptcp_setsockopt_sol_tcp_nodelay(struct mptcp_sock *msk, int val) { struct mptcp_subflow_context *subflow; struct sock *sk = (struct sock *)msk; sockopt_seq_inc(msk); msk->nodelay = !!val; mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); lock_sock(ssk); __tcp_sock_set_nodelay(ssk, !!val); release_sock(ssk); } if (val) mptcp_check_and_set_pending(sk); return 0; } static int mptcp_setsockopt_sol_ip_set(struct mptcp_sock *msk, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = (struct sock *)msk; struct sock *ssk; int err; err = ip_setsockopt(sk, SOL_IP, optname, optval, optlen); if (err != 0) return err; lock_sock(sk); ssk = __mptcp_nmpc_sk(msk); if (IS_ERR(ssk)) { release_sock(sk); return PTR_ERR(ssk); } switch (optname) { case IP_FREEBIND: inet_assign_bit(FREEBIND, ssk, inet_test_bit(FREEBIND, sk)); break; case IP_TRANSPARENT: inet_assign_bit(TRANSPARENT, ssk, inet_test_bit(TRANSPARENT, sk)); break; case IP_BIND_ADDRESS_NO_PORT: inet_assign_bit(BIND_ADDRESS_NO_PORT, ssk, inet_test_bit(BIND_ADDRESS_NO_PORT, sk)); break; case IP_LOCAL_PORT_RANGE: WRITE_ONCE(inet_sk(ssk)->local_port_range, READ_ONCE(inet_sk(sk)->local_port_range)); break; default: release_sock(sk); WARN_ON_ONCE(1); return -EOPNOTSUPP; } sockopt_seq_inc(msk); release_sock(sk); return 0; } static int mptcp_setsockopt_v4_set_tos(struct mptcp_sock *msk, int optname, sockptr_t optval, unsigned int optlen) { struct mptcp_subflow_context *subflow; struct sock *sk = (struct sock *)msk; int err, val; err = ip_setsockopt(sk, SOL_IP, optname, optval, optlen); if (err != 0) return err; lock_sock(sk); sockopt_seq_inc(msk); val = READ_ONCE(inet_sk(sk)->tos); mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); bool slow; slow = lock_sock_fast(ssk); __ip_sock_set_tos(ssk, val); unlock_sock_fast(ssk, slow); } release_sock(sk); return 0; } static int mptcp_setsockopt_v4(struct mptcp_sock *msk, int optname, sockptr_t optval, unsigned int optlen) { switch (optname) { case IP_FREEBIND: case IP_TRANSPARENT: case IP_BIND_ADDRESS_NO_PORT: case IP_LOCAL_PORT_RANGE: return mptcp_setsockopt_sol_ip_set(msk, optname, optval, optlen); case IP_TOS: return mptcp_setsockopt_v4_set_tos(msk, optname, optval, optlen); } return -EOPNOTSUPP; } static int mptcp_setsockopt_first_sf_only(struct mptcp_sock *msk, int level, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = (struct sock *)msk; struct sock *ssk; int ret; /* Limit to first subflow, before the connection establishment */ lock_sock(sk); ssk = __mptcp_nmpc_sk(msk); if (IS_ERR(ssk)) { ret = PTR_ERR(ssk); goto unlock; } ret = tcp_setsockopt(ssk, level, optname, optval, optlen); unlock: release_sock(sk); return ret; } static int mptcp_setsockopt_all_sf(struct mptcp_sock *msk, int level, int optname, sockptr_t optval, unsigned int optlen) { struct mptcp_subflow_context *subflow; int ret = 0; mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); ret = tcp_setsockopt(ssk, level, optname, optval, optlen); if (ret) break; } return ret; } static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = (void *)msk; int ret, val; switch (optname) { case TCP_ULP: return -EOPNOTSUPP; case TCP_CONGESTION: return mptcp_setsockopt_sol_tcp_congestion(msk, optval, optlen); case TCP_DEFER_ACCEPT: /* See tcp.c: TCP_DEFER_ACCEPT does not fail */ mptcp_setsockopt_first_sf_only(msk, SOL_TCP, optname, optval, optlen); return 0; case TCP_FASTOPEN: case TCP_FASTOPEN_CONNECT: case TCP_FASTOPEN_KEY: case TCP_FASTOPEN_NO_COOKIE: return mptcp_setsockopt_first_sf_only(msk, SOL_TCP, optname, optval, optlen); } ret = mptcp_get_int_option(msk, optval, optlen, &val); if (ret) return ret; lock_sock(sk); switch (optname) { case TCP_INQ: if (val < 0 || val > 1) ret = -EINVAL; else msk->recvmsg_inq = !!val; break; case TCP_NOTSENT_LOWAT: WRITE_ONCE(msk->notsent_lowat, val); mptcp_write_space(sk); break; case TCP_CORK: ret = __mptcp_setsockopt_sol_tcp_cork(msk, val); break; case TCP_NODELAY: ret = __mptcp_setsockopt_sol_tcp_nodelay(msk, val); break; case TCP_KEEPIDLE: ret = __mptcp_setsockopt_set_val(msk, MAX_TCP_KEEPIDLE, &tcp_sock_set_keepidle_locked, &msk->keepalive_idle, val); break; case TCP_KEEPINTVL: ret = __mptcp_setsockopt_set_val(msk, MAX_TCP_KEEPINTVL, &tcp_sock_set_keepintvl, &msk->keepalive_intvl, val); break; case TCP_KEEPCNT: ret = __mptcp_setsockopt_set_val(msk, MAX_TCP_KEEPCNT, &tcp_sock_set_keepcnt, &msk->keepalive_cnt, val); break; case TCP_MAXSEG: msk->maxseg = val; ret = mptcp_setsockopt_all_sf(msk, SOL_TCP, optname, optval, optlen); break; default: ret = -ENOPROTOOPT; } release_sock(sk); return ret; } int mptcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen) { struct mptcp_sock *msk = mptcp_sk(sk); struct sock *ssk; pr_debug("msk=%p\n", msk); if (level == SOL_SOCKET) return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen); if (!mptcp_supported_sockopt(level, optname)) return -ENOPROTOOPT; /* @@ the meaning of setsockopt() when the socket is connected and * there are multiple subflows is not yet defined. It is up to the * MPTCP-level socket to configure the subflows until the subflow * is in TCP fallback, when TCP socket options are passed through * to the one remaining subflow. */ lock_sock(sk); ssk = __mptcp_tcp_fallback(msk); release_sock(sk); if (ssk) return tcp_setsockopt(ssk, level, optname, optval, optlen); if (level == SOL_IP) return mptcp_setsockopt_v4(msk, optname, optval, optlen); if (level == SOL_IPV6) return mptcp_setsockopt_v6(msk, optname, optval, optlen); if (level == SOL_TCP) return mptcp_setsockopt_sol_tcp(msk, optname, optval, optlen); return -EOPNOTSUPP; } static int mptcp_getsockopt_first_sf_only(struct mptcp_sock *msk, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = (struct sock *)msk; struct sock *ssk; int ret; lock_sock(sk); ssk = msk->first; if (ssk) goto get; ssk = __mptcp_nmpc_sk(msk); if (IS_ERR(ssk)) { ret = PTR_ERR(ssk); goto out; } get: ret = tcp_getsockopt(ssk, level, optname, optval, optlen); out: release_sock(sk); return ret; } void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info) { struct sock *sk = (struct sock *)msk; u32 flags = 0; bool slow; u32 now; memset(info, 0, sizeof(*info)); info->mptcpi_extra_subflows = READ_ONCE(msk->pm.extra_subflows); info->mptcpi_add_addr_signal = READ_ONCE(msk->pm.add_addr_signaled); info->mptcpi_add_addr_accepted = READ_ONCE(msk->pm.add_addr_accepted); info->mptcpi_local_addr_used = READ_ONCE(msk->pm.local_addr_used); if (inet_sk_state_load(sk) == TCP_LISTEN) return; /* The following limits only make sense for the in-kernel PM */ if (mptcp_pm_is_kernel(msk)) { info->mptcpi_limit_extra_subflows = mptcp_pm_get_limit_extra_subflows(msk); info->mptcpi_endp_signal_max = mptcp_pm_get_endp_signal_max(msk); info->mptcpi_limit_add_addr_accepted = mptcp_pm_get_limit_add_addr_accepted(msk); info->mptcpi_endp_subflow_max = mptcp_pm_get_endp_subflow_max(msk); info->mptcpi_endp_laminar_max = mptcp_pm_get_endp_laminar_max(msk); info->mptcpi_endp_fullmesh_max = mptcp_pm_get_endp_fullmesh_max(msk); } if (__mptcp_check_fallback(msk)) flags |= MPTCP_INFO_FLAG_FALLBACK; if (READ_ONCE(msk->can_ack)) flags |= MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED; info->mptcpi_flags = flags; slow = lock_sock_fast(sk); info->mptcpi_csum_enabled = READ_ONCE(msk->csum_enabled); info->mptcpi_token = msk->token; info->mptcpi_write_seq = msk->write_seq; info->mptcpi_retransmits = inet_csk(sk)->icsk_retransmits; info->mptcpi_bytes_sent = msk->bytes_sent; info->mptcpi_bytes_received = msk->bytes_received; info->mptcpi_bytes_retrans = msk->bytes_retrans; info->mptcpi_subflows_total = info->mptcpi_extra_subflows + __mptcp_has_initial_subflow(msk); now = tcp_jiffies32; info->mptcpi_last_data_sent = jiffies_to_msecs(now - msk->last_data_sent); info->mptcpi_last_data_recv = jiffies_to_msecs(now - msk->last_data_recv); unlock_sock_fast(sk, slow); mptcp_data_lock(sk); info->mptcpi_last_ack_recv = jiffies_to_msecs(now - msk->last_ack_recv); info->mptcpi_snd_una = msk->snd_una; info->mptcpi_rcv_nxt = msk->ack_seq; info->mptcpi_bytes_acked = msk->bytes_acked; mptcp_data_unlock(sk); } EXPORT_SYMBOL_GPL(mptcp_diag_fill_info); static int mptcp_getsockopt_info(struct mptcp_sock *msk, char __user *optval, int __user *optlen) { struct mptcp_info m_info; int len; if (get_user(len, optlen)) return -EFAULT; /* When used only to check if a fallback to TCP happened. */ if (len == 0) return 0; len = min_t(unsigned int, len, sizeof(struct mptcp_info)); mptcp_diag_fill_info(msk, &m_info); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &m_info, len)) return -EFAULT; return 0; } static int mptcp_put_subflow_data(struct mptcp_subflow_data *sfd, char __user *optval, u32 copied, int __user *optlen) { u32 copylen = min_t(u32, sfd->size_subflow_data, sizeof(*sfd)); if (copied) copied += sfd->size_subflow_data; else copied = copylen; if (put_user(copied, optlen)) return -EFAULT; if (copy_to_user(optval, sfd, copylen)) return -EFAULT; return 0; } static int mptcp_get_subflow_data(struct mptcp_subflow_data *sfd, char __user *optval, int __user *optlen) { int len, copylen; if (get_user(len, optlen)) return -EFAULT; /* if mptcp_subflow_data size is changed, need to adjust * this function to deal with programs using old version. */ BUILD_BUG_ON(sizeof(*sfd) != MIN_INFO_OPTLEN_SIZE); if (len < MIN_INFO_OPTLEN_SIZE) return -EINVAL; memset(sfd, 0, sizeof(*sfd)); copylen = min_t(unsigned int, len, sizeof(*sfd)); if (copy_from_user(sfd, optval, copylen)) return -EFAULT; /* size_subflow_data is u32, but len is signed */ if (sfd->size_subflow_data > INT_MAX || sfd->size_user > INT_MAX) return -EINVAL; if (sfd->size_subflow_data < MIN_INFO_OPTLEN_SIZE || sfd->size_subflow_data > len) return -EINVAL; if (sfd->num_subflows || sfd->size_kernel) return -EINVAL; return len - sfd->size_subflow_data; } static int mptcp_getsockopt_tcpinfo(struct mptcp_sock *msk, char __user *optval, int __user *optlen) { struct mptcp_subflow_context *subflow; struct sock *sk = (struct sock *)msk; unsigned int sfcount = 0, copied = 0; struct mptcp_subflow_data sfd; char __user *infoptr; int len; len = mptcp_get_subflow_data(&sfd, optval, optlen); if (len < 0) return len; sfd.size_kernel = sizeof(struct tcp_info); sfd.size_user = min_t(unsigned int, sfd.size_user, sizeof(struct tcp_info)); infoptr = optval + sfd.size_subflow_data; lock_sock(sk); mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); ++sfcount; if (len && len >= sfd.size_user) { struct tcp_info info; tcp_get_info(ssk, &info); if (copy_to_user(infoptr, &info, sfd.size_user)) { release_sock(sk); return -EFAULT; } infoptr += sfd.size_user; copied += sfd.size_user; len -= sfd.size_user; } } release_sock(sk); sfd.num_subflows = sfcount; if (mptcp_put_subflow_data(&sfd, optval, copied, optlen)) return -EFAULT; return 0; } static void mptcp_get_sub_addrs(const struct sock *sk, struct mptcp_subflow_addrs *a) { const struct inet_sock *inet = inet_sk(sk); memset(a, 0, sizeof(*a)); if (sk->sk_family == AF_INET) { a->sin_local.sin_family = AF_INET; a->sin_local.sin_port = inet->inet_sport; a->sin_local.sin_addr.s_addr = inet->inet_rcv_saddr; if (!a->sin_local.sin_addr.s_addr) a->sin_local.sin_addr.s_addr = inet->inet_saddr; a->sin_remote.sin_family = AF_INET; a->sin_remote.sin_port = inet->inet_dport; a->sin_remote.sin_addr.s_addr = inet->inet_daddr; #if IS_ENABLED(CONFIG_IPV6) } else if (sk->sk_family == AF_INET6) { const struct ipv6_pinfo *np = inet6_sk(sk); if (WARN_ON_ONCE(!np)) return; a->sin6_local.sin6_family = AF_INET6; a->sin6_local.sin6_port = inet->inet_sport; if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) a->sin6_local.sin6_addr = np->saddr; else a->sin6_local.sin6_addr = sk->sk_v6_rcv_saddr; a->sin6_remote.sin6_family = AF_INET6; a->sin6_remote.sin6_port = inet->inet_dport; a->sin6_remote.sin6_addr = sk->sk_v6_daddr; #endif } } static int mptcp_getsockopt_subflow_addrs(struct mptcp_sock *msk, char __user *optval, int __user *optlen) { struct mptcp_subflow_context *subflow; struct sock *sk = (struct sock *)msk; unsigned int sfcount = 0, copied = 0; struct mptcp_subflow_data sfd; char __user *addrptr; int len; len = mptcp_get_subflow_data(&sfd, optval, optlen); if (len < 0) return len; sfd.size_kernel = sizeof(struct mptcp_subflow_addrs); sfd.size_user = min_t(unsigned int, sfd.size_user, sizeof(struct mptcp_subflow_addrs)); addrptr = optval + sfd.size_subflow_data; lock_sock(sk); mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); ++sfcount; if (len && len >= sfd.size_user) { struct mptcp_subflow_addrs a; mptcp_get_sub_addrs(ssk, &a); if (copy_to_user(addrptr, &a, sfd.size_user)) { release_sock(sk); return -EFAULT; } addrptr += sfd.size_user; copied += sfd.size_user; len -= sfd.size_user; } } release_sock(sk); sfd.num_subflows = sfcount; if (mptcp_put_subflow_data(&sfd, optval, copied, optlen)) return -EFAULT; return 0; } static int mptcp_get_full_info(struct mptcp_full_info *mfi, char __user *optval, int __user *optlen) { int len; BUILD_BUG_ON(offsetof(struct mptcp_full_info, mptcp_info) != MIN_FULL_INFO_OPTLEN_SIZE); if (get_user(len, optlen)) return -EFAULT; if (len < MIN_FULL_INFO_OPTLEN_SIZE) return -EINVAL; memset(mfi, 0, sizeof(*mfi)); if (copy_from_user(mfi, optval, MIN_FULL_INFO_OPTLEN_SIZE)) return -EFAULT; if (mfi->size_tcpinfo_kernel || mfi->size_sfinfo_kernel || mfi->num_subflows) return -EINVAL; if (mfi->size_sfinfo_user > INT_MAX || mfi->size_tcpinfo_user > INT_MAX) return -EINVAL; return len - MIN_FULL_INFO_OPTLEN_SIZE; } static int mptcp_put_full_info(struct mptcp_full_info *mfi, char __user *optval, u32 copylen, int __user *optlen) { copylen += MIN_FULL_INFO_OPTLEN_SIZE; if (put_user(copylen, optlen)) return -EFAULT; if (copy_to_user(optval, mfi, copylen)) return -EFAULT; return 0; } static int mptcp_getsockopt_full_info(struct mptcp_sock *msk, char __user *optval, int __user *optlen) { unsigned int sfcount = 0, copylen = 0; struct mptcp_subflow_context *subflow; struct sock *sk = (struct sock *)msk; void __user *tcpinfoptr, *sfinfoptr; struct mptcp_full_info mfi; int len; len = mptcp_get_full_info(&mfi, optval, optlen); if (len < 0) return len; /* don't bother filling the mptcp info if there is not enough * user-space-provided storage */ if (len > 0) { mptcp_diag_fill_info(msk, &mfi.mptcp_info); copylen += min_t(unsigned int, len, sizeof(struct mptcp_info)); } mfi.size_tcpinfo_kernel = sizeof(struct tcp_info); mfi.size_tcpinfo_user = min_t(unsigned int, mfi.size_tcpinfo_user, sizeof(struct tcp_info)); sfinfoptr = u64_to_user_ptr(mfi.subflow_info); mfi.size_sfinfo_kernel = sizeof(struct mptcp_subflow_info); mfi.size_sfinfo_user = min_t(unsigned int, mfi.size_sfinfo_user, sizeof(struct mptcp_subflow_info)); tcpinfoptr = u64_to_user_ptr(mfi.tcp_info); lock_sock(sk); mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); struct mptcp_subflow_info sfinfo; struct tcp_info tcp_info; if (sfcount++ >= mfi.size_arrays_user) continue; /* fetch addr/tcp_info only if the user space buffers * are wide enough */ memset(&sfinfo, 0, sizeof(sfinfo)); sfinfo.id = subflow->subflow_id; if (mfi.size_sfinfo_user > offsetof(struct mptcp_subflow_info, addrs)) mptcp_get_sub_addrs(ssk, &sfinfo.addrs); if (copy_to_user(sfinfoptr, &sfinfo, mfi.size_sfinfo_user)) goto fail_release; if (mfi.size_tcpinfo_user) { tcp_get_info(ssk, &tcp_info); if (copy_to_user(tcpinfoptr, &tcp_info, mfi.size_tcpinfo_user)) goto fail_release; } tcpinfoptr += mfi.size_tcpinfo_user; sfinfoptr += mfi.size_sfinfo_user; } release_sock(sk); mfi.num_subflows = sfcount; if (mptcp_put_full_info(&mfi, optval, copylen, optlen)) return -EFAULT; return 0; fail_release: release_sock(sk); return -EFAULT; } static int mptcp_put_int_option(struct mptcp_sock *msk, char __user *optval, int __user *optlen, int val) { int len; if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) { unsigned char ucval = (unsigned char)val; len = 1; if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &ucval, 1)) return -EFAULT; } else { len = min_t(unsigned int, len, sizeof(int)); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; } return 0; } static int mptcp_getsockopt_sol_tcp(struct mptcp_sock *msk, int optname, char __user *optval, int __user *optlen) { struct sock *sk = (void *)msk; switch (optname) { case TCP_ULP: case TCP_CONGESTION: case TCP_INFO: case TCP_CC_INFO: case TCP_DEFER_ACCEPT: case TCP_FASTOPEN: case TCP_FASTOPEN_CONNECT: case TCP_FASTOPEN_KEY: case TCP_FASTOPEN_NO_COOKIE: return mptcp_getsockopt_first_sf_only(msk, SOL_TCP, optname, optval, optlen); case TCP_INQ: return mptcp_put_int_option(msk, optval, optlen, msk->recvmsg_inq); case TCP_CORK: return mptcp_put_int_option(msk, optval, optlen, msk->cork); case TCP_NODELAY: return mptcp_put_int_option(msk, optval, optlen, msk->nodelay); case TCP_KEEPIDLE: return mptcp_put_int_option(msk, optval, optlen, msk->keepalive_idle ? : READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_keepalive_time) / HZ); case TCP_KEEPINTVL: return mptcp_put_int_option(msk, optval, optlen, msk->keepalive_intvl ? : READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_keepalive_intvl) / HZ); case TCP_KEEPCNT: return mptcp_put_int_option(msk, optval, optlen, msk->keepalive_cnt ? : READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_keepalive_probes)); case TCP_NOTSENT_LOWAT: return mptcp_put_int_option(msk, optval, optlen, msk->notsent_lowat); case TCP_IS_MPTCP: return mptcp_put_int_option(msk, optval, optlen, 1); case TCP_MAXSEG: return mptcp_getsockopt_first_sf_only(msk, SOL_TCP, optname, optval, optlen); } return -EOPNOTSUPP; } static int mptcp_getsockopt_v4(struct mptcp_sock *msk, int optname, char __user *optval, int __user *optlen) { struct sock *sk = (void *)msk; switch (optname) { case IP_TOS: return mptcp_put_int_option(msk, optval, optlen, READ_ONCE(inet_sk(sk)->tos)); case IP_FREEBIND: return mptcp_put_int_option(msk, optval, optlen, inet_test_bit(FREEBIND, sk)); case IP_TRANSPARENT: return mptcp_put_int_option(msk, optval, optlen, inet_test_bit(TRANSPARENT, sk)); case IP_BIND_ADDRESS_NO_PORT: return mptcp_put_int_option(msk, optval, optlen, inet_test_bit(BIND_ADDRESS_NO_PORT, sk)); case IP_LOCAL_PORT_RANGE: return mptcp_put_int_option(msk, optval, optlen, READ_ONCE(inet_sk(sk)->local_port_range)); } return -EOPNOTSUPP; } static int mptcp_getsockopt_v6(struct mptcp_sock *msk, int optname, char __user *optval, int __user *optlen) { struct sock *sk = (void *)msk; switch (optname) { case IPV6_V6ONLY: return mptcp_put_int_option(msk, optval, optlen, sk->sk_ipv6only); case IPV6_TRANSPARENT: return mptcp_put_int_option(msk, optval, optlen, inet_test_bit(TRANSPARENT, sk)); case IPV6_FREEBIND: return mptcp_put_int_option(msk, optval, optlen, inet_test_bit(FREEBIND, sk)); } return -EOPNOTSUPP; } static int mptcp_getsockopt_sol_mptcp(struct mptcp_sock *msk, int optname, char __user *optval, int __user *optlen) { switch (optname) { case MPTCP_INFO: return mptcp_getsockopt_info(msk, optval, optlen); case MPTCP_FULL_INFO: return mptcp_getsockopt_full_info(msk, optval, optlen); case MPTCP_TCPINFO: return mptcp_getsockopt_tcpinfo(msk, optval, optlen); case MPTCP_SUBFLOW_ADDRS: return mptcp_getsockopt_subflow_addrs(msk, optval, optlen); } return -EOPNOTSUPP; } int mptcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *option) { struct mptcp_sock *msk = mptcp_sk(sk); struct sock *ssk; pr_debug("msk=%p\n", msk); /* @@ the meaning of setsockopt() when the socket is connected and * there are multiple subflows is not yet defined. It is up to the * MPTCP-level socket to configure the subflows until the subflow * is in TCP fallback, when socket options are passed through * to the one remaining subflow. */ lock_sock(sk); ssk = __mptcp_tcp_fallback(msk); release_sock(sk); if (ssk) return tcp_getsockopt(ssk, level, optname, optval, option); if (level == SOL_IP) return mptcp_getsockopt_v4(msk, optname, optval, option); if (level == SOL_IPV6) return mptcp_getsockopt_v6(msk, optname, optval, option); if (level == SOL_TCP) return mptcp_getsockopt_sol_tcp(msk, optname, optval, option); if (level == SOL_MPTCP) return mptcp_getsockopt_sol_mptcp(msk, optname, optval, option); return -EOPNOTSUPP; } static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk) { static const unsigned int tx_rx_locks = SOCK_RCVBUF_LOCK | SOCK_SNDBUF_LOCK; struct sock *sk = (struct sock *)msk; bool keep_open; keep_open = sock_flag(sk, SOCK_KEEPOPEN); if (ssk->sk_prot->keepalive) ssk->sk_prot->keepalive(ssk, keep_open); sock_valbool_flag(ssk, SOCK_KEEPOPEN, keep_open); ssk->sk_priority = sk->sk_priority; ssk->sk_bound_dev_if = sk->sk_bound_dev_if; ssk->sk_incoming_cpu = sk->sk_incoming_cpu; ssk->sk_ipv6only = sk->sk_ipv6only; __ip_sock_set_tos(ssk, inet_sk(sk)->tos); if (sk->sk_userlocks & tx_rx_locks) { ssk->sk_userlocks |= sk->sk_userlocks & tx_rx_locks; if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) { WRITE_ONCE(ssk->sk_sndbuf, sk->sk_sndbuf); mptcp_subflow_ctx(ssk)->cached_sndbuf = sk->sk_sndbuf; } if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) WRITE_ONCE(ssk->sk_rcvbuf, sk->sk_rcvbuf); } if (sock_flag(sk, SOCK_LINGER)) { ssk->sk_lingertime = sk->sk_lingertime; sock_set_flag(ssk, SOCK_LINGER); } else { sock_reset_flag(ssk, SOCK_LINGER); } if (sk->sk_mark != ssk->sk_mark) { ssk->sk_mark = sk->sk_mark; sk_dst_reset(ssk); } sock_valbool_flag(ssk, SOCK_DBG, sock_flag(sk, SOCK_DBG)); if (inet_csk(sk)->icsk_ca_ops != inet_csk(ssk)->icsk_ca_ops) tcp_set_congestion_control(ssk, msk->ca_name, false, true); __tcp_sock_set_cork(ssk, !!msk->cork); __tcp_sock_set_nodelay(ssk, !!msk->nodelay); tcp_sock_set_keepidle_locked(ssk, msk->keepalive_idle); tcp_sock_set_keepintvl(ssk, msk->keepalive_intvl); tcp_sock_set_keepcnt(ssk, msk->keepalive_cnt); tcp_sock_set_maxseg(ssk, msk->maxseg); inet_assign_bit(TRANSPARENT, ssk, inet_test_bit(TRANSPARENT, sk)); inet_assign_bit(FREEBIND, ssk, inet_test_bit(FREEBIND, sk)); inet_assign_bit(BIND_ADDRESS_NO_PORT, ssk, inet_test_bit(BIND_ADDRESS_NO_PORT, sk)); WRITE_ONCE(inet_sk(ssk)->local_port_range, READ_ONCE(inet_sk(sk)->local_port_range)); } void mptcp_sockopt_sync_locked(struct mptcp_sock *msk, struct sock *ssk) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); msk_owned_by_me(msk); ssk->sk_rcvlowat = 0; /* subflows must ignore any latency-related settings: will not affect * the user-space - only the msk is relevant - but will foul the * mptcp scheduler */ tcp_sk(ssk)->notsent_lowat = UINT_MAX; if (READ_ONCE(subflow->setsockopt_seq) != msk->setsockopt_seq) { sync_socket_options(msk, ssk); subflow->setsockopt_seq = msk->setsockopt_seq; } } /* unfortunately this is different enough from the tcp version so * that we can't factor it out */ int mptcp_set_rcvlowat(struct sock *sk, int val) { struct mptcp_subflow_context *subflow; int space, cap; /* bpf can land here with a wrong sk type */ if (sk->sk_protocol == IPPROTO_TCP) return -EINVAL; if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) cap = sk->sk_rcvbuf >> 1; else cap = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1; val = min(val, cap); WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); /* Check if we need to signal EPOLLIN right now */ if (mptcp_epollin_ready(sk)) sk->sk_data_ready(sk); if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) return 0; space = mptcp_space_from_win(sk, val); if (space <= sk->sk_rcvbuf) return 0; /* propagate the rcvbuf changes to all the subflows */ WRITE_ONCE(sk->sk_rcvbuf, space); mptcp_for_each_subflow(mptcp_sk(sk), subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); bool slow; slow = lock_sock_fast(ssk); WRITE_ONCE(ssk->sk_rcvbuf, space); WRITE_ONCE(tcp_sk(ssk)->window_clamp, val); unlock_sock_fast(ssk, slow); } return 0; }
2288 2253 2282 1044 3472 3118 2268 2160 85 2522 2525 2530 216 967 2751 2762 43 44 170 173 13 13 1730 1733 1717 1715 1712 32 32 4 4 28 28 2 2 1212 1214 819 1214 1214 1214 1206 696 40 40 40 15 125 16 110 1324 1330 3603 3611 3307 2152 2948 172 172 171 171 32 2 2 2 2 1425 1424 1425 1422 1 1 12 12 12 12 11 12 35 16 16 16 16 16 321 118 323 308 321 321 49 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 // SPDX-License-Identifier: GPL-2.0-only #include <linux/mm.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/compiler.h> #include <linux/export.h> #include <linux/err.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/sched/signal.h> #include <linux/sched/task_stack.h> #include <linux/security.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/sysctl.h> #include <linux/mman.h> #include <linux/hugetlb.h> #include <linux/vmalloc.h> #include <linux/userfaultfd_k.h> #include <linux/elf.h> #include <linux/elf-randomize.h> #include <linux/personality.h> #include <linux/random.h> #include <linux/processor.h> #include <linux/sizes.h> #include <linux/compat.h> #include <linux/fsnotify.h> #include <linux/page_idle.h> #include <linux/uaccess.h> #include <kunit/visibility.h> #include "internal.h" #include "swap.h" /** * kfree_const - conditionally free memory * @x: pointer to the memory * * Function calls kfree only if @x is not in .rodata section. */ void kfree_const(const void *x) { if (!is_kernel_rodata((unsigned long)x)) kfree(x); } EXPORT_SYMBOL(kfree_const); /** * __kmemdup_nul - Create a NUL-terminated string from @s, which might be unterminated. * @s: The data to copy * @len: The size of the data, not including the NUL terminator * @gfp: the GFP mask used in the kmalloc() call when allocating memory * * Return: newly allocated copy of @s with NUL-termination or %NULL in * case of error */ static __always_inline char *__kmemdup_nul(const char *s, size_t len, gfp_t gfp) { char *buf; /* '+1' for the NUL terminator */ buf = kmalloc_track_caller(len + 1, gfp); if (!buf) return NULL; memcpy(buf, s, len); /* Ensure the buf is always NUL-terminated, regardless of @s. */ buf[len] = '\0'; return buf; } /** * kstrdup - allocate space for and copy an existing string * @s: the string to duplicate * @gfp: the GFP mask used in the kmalloc() call when allocating memory * * Return: newly allocated copy of @s or %NULL in case of error */ noinline char *kstrdup(const char *s, gfp_t gfp) { return s ? __kmemdup_nul(s, strlen(s), gfp) : NULL; } EXPORT_SYMBOL(kstrdup); /** * kstrdup_const - conditionally duplicate an existing const string * @s: the string to duplicate * @gfp: the GFP mask used in the kmalloc() call when allocating memory * * Note: Strings allocated by kstrdup_const should be freed by kfree_const and * must not be passed to krealloc(). * * Return: source string if it is in .rodata section otherwise * fallback to kstrdup. */ const char *kstrdup_const(const char *s, gfp_t gfp) { if (is_kernel_rodata((unsigned long)s)) return s; return kstrdup(s, gfp); } EXPORT_SYMBOL(kstrdup_const); /** * kstrndup - allocate space for and copy an existing string * @s: the string to duplicate * @max: read at most @max chars from @s * @gfp: the GFP mask used in the kmalloc() call when allocating memory * * Note: Use kmemdup_nul() instead if the size is known exactly. * * Return: newly allocated copy of @s or %NULL in case of error */ char *kstrndup(const char *s, size_t max, gfp_t gfp) { return s ? __kmemdup_nul(s, strnlen(s, max), gfp) : NULL; } EXPORT_SYMBOL(kstrndup); /** * kmemdup - duplicate region of memory * * @src: memory region to duplicate * @len: memory region length * @gfp: GFP mask to use * * Return: newly allocated copy of @src or %NULL in case of error, * result is physically contiguous. Use kfree() to free. */ void *kmemdup_noprof(const void *src, size_t len, gfp_t gfp) { void *p; p = kmalloc_node_track_caller_noprof(len, gfp, NUMA_NO_NODE, _RET_IP_); if (p) memcpy(p, src, len); return p; } EXPORT_SYMBOL(kmemdup_noprof); /** * kmemdup_array - duplicate a given array. * * @src: array to duplicate. * @count: number of elements to duplicate from array. * @element_size: size of each element of array. * @gfp: GFP mask to use. * * Return: duplicated array of @src or %NULL in case of error, * result is physically contiguous. Use kfree() to free. */ void *kmemdup_array(const void *src, size_t count, size_t element_size, gfp_t gfp) { return kmemdup(src, size_mul(element_size, count), gfp); } EXPORT_SYMBOL(kmemdup_array); /** * kvmemdup - duplicate region of memory * * @src: memory region to duplicate * @len: memory region length * @gfp: GFP mask to use * * Return: newly allocated copy of @src or %NULL in case of error, * result may be not physically contiguous. Use kvfree() to free. */ void *kvmemdup(const void *src, size_t len, gfp_t gfp) { void *p; p = kvmalloc(len, gfp); if (p) memcpy(p, src, len); return p; } EXPORT_SYMBOL(kvmemdup); /** * kmemdup_nul - Create a NUL-terminated string from unterminated data * @s: The data to stringify * @len: The size of the data * @gfp: the GFP mask used in the kmalloc() call when allocating memory * * Return: newly allocated copy of @s with NUL-termination or %NULL in * case of error */ char *kmemdup_nul(const char *s, size_t len, gfp_t gfp) { return s ? __kmemdup_nul(s, len, gfp) : NULL; } EXPORT_SYMBOL(kmemdup_nul); static kmem_buckets *user_buckets __ro_after_init; static int __init init_user_buckets(void) { user_buckets = kmem_buckets_create("memdup_user", 0, 0, INT_MAX, NULL); return 0; } subsys_initcall(init_user_buckets); /** * memdup_user - duplicate memory region from user space * * @src: source address in user space * @len: number of bytes to copy * * Return: an ERR_PTR() on failure. Result is physically * contiguous, to be freed by kfree(). */ void *memdup_user(const void __user *src, size_t len) { void *p; p = kmem_buckets_alloc_track_caller(user_buckets, len, GFP_USER | __GFP_NOWARN); if (!p) return ERR_PTR(-ENOMEM); if (copy_from_user(p, src, len)) { kfree(p); return ERR_PTR(-EFAULT); } return p; } EXPORT_SYMBOL(memdup_user); /** * vmemdup_user - duplicate memory region from user space * * @src: source address in user space * @len: number of bytes to copy * * Return: an ERR_PTR() on failure. Result may be not * physically contiguous. Use kvfree() to free. */ void *vmemdup_user(const void __user *src, size_t len) { void *p; p = kmem_buckets_valloc(user_buckets, len, GFP_USER); if (!p) return ERR_PTR(-ENOMEM); if (copy_from_user(p, src, len)) { kvfree(p); return ERR_PTR(-EFAULT); } return p; } EXPORT_SYMBOL(vmemdup_user); /** * strndup_user - duplicate an existing string from user space * @s: The string to duplicate * @n: Maximum number of bytes to copy, including the trailing NUL. * * Return: newly allocated copy of @s or an ERR_PTR() in case of error */ char *strndup_user(const char __user *s, long n) { char *p; long length; length = strnlen_user(s, n); if (!length) return ERR_PTR(-EFAULT); if (length > n) return ERR_PTR(-EINVAL); p = memdup_user(s, length); if (IS_ERR(p)) return p; p[length - 1] = '\0'; return p; } EXPORT_SYMBOL(strndup_user); /** * memdup_user_nul - duplicate memory region from user space and NUL-terminate * * @src: source address in user space * @len: number of bytes to copy * * Return: an ERR_PTR() on failure. */ void *memdup_user_nul(const void __user *src, size_t len) { char *p; p = kmem_buckets_alloc_track_caller(user_buckets, len + 1, GFP_USER | __GFP_NOWARN); if (!p) return ERR_PTR(-ENOMEM); if (copy_from_user(p, src, len)) { kfree(p); return ERR_PTR(-EFAULT); } p[len] = '\0'; return p; } EXPORT_SYMBOL(memdup_user_nul); /* Check if the vma is being used as a stack by this task */ int vma_is_stack_for_current(const struct vm_area_struct *vma) { struct task_struct * __maybe_unused t = current; return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); } /* * Change backing file, only valid to use during initial VMA setup. */ void vma_set_file(struct vm_area_struct *vma, struct file *file) { /* Changing an anonymous vma with this is illegal */ get_file(file); swap(vma->vm_file, file); fput(file); } EXPORT_SYMBOL(vma_set_file); #ifndef STACK_RND_MASK #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */ #endif unsigned long randomize_stack_top(unsigned long stack_top) { unsigned long random_variable = 0; if (current->flags & PF_RANDOMIZE) { random_variable = get_random_long(); random_variable &= STACK_RND_MASK; random_variable <<= PAGE_SHIFT; } #ifdef CONFIG_STACK_GROWSUP return PAGE_ALIGN(stack_top) + random_variable; #else return PAGE_ALIGN(stack_top) - random_variable; #endif } /** * randomize_page - Generate a random, page aligned address * @start: The smallest acceptable address the caller will take. * @range: The size of the area, starting at @start, within which the * random address must fall. * * If @start + @range would overflow, @range is capped. * * NOTE: Historical use of randomize_range, which this replaces, presumed that * @start was already page aligned. We now align it regardless. * * Return: A page aligned address within [start, start + range). On error, * @start is returned. */ unsigned long randomize_page(unsigned long start, unsigned long range) { if (!PAGE_ALIGNED(start)) { range -= PAGE_ALIGN(start) - start; start = PAGE_ALIGN(start); } if (start > ULONG_MAX - range) range = ULONG_MAX - start; range >>= PAGE_SHIFT; if (range == 0) return start; return start + (get_random_long() % range << PAGE_SHIFT); } #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT unsigned long __weak arch_randomize_brk(struct mm_struct *mm) { /* Is the current task 32bit ? */ if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task()) return randomize_page(mm->brk, SZ_32M); return randomize_page(mm->brk, SZ_1G); } unsigned long arch_mmap_rnd(void) { unsigned long rnd; #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS if (is_compat_task()) rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1); else #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */ rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); return rnd << PAGE_SHIFT; } static int mmap_is_legacy(const struct rlimit *rlim_stack) { if (current->personality & ADDR_COMPAT_LAYOUT) return 1; /* On parisc the stack always grows up - so a unlimited stack should * not be an indicator to use the legacy memory layout. */ if (rlim_stack->rlim_cur == RLIM_INFINITY && !IS_ENABLED(CONFIG_STACK_GROWSUP)) return 1; return sysctl_legacy_va_layout; } /* * Leave enough space between the mmap area and the stack to honour ulimit in * the face of randomisation. */ #define MIN_GAP (SZ_128M) #define MAX_GAP (STACK_TOP / 6 * 5) static unsigned long mmap_base(const unsigned long rnd, const struct rlimit *rlim_stack) { #ifdef CONFIG_STACK_GROWSUP /* * For an upwards growing stack the calculation is much simpler. * Memory for the maximum stack size is reserved at the top of the * task. mmap_base starts directly below the stack and grows * downwards. */ return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack) - rnd); #else unsigned long gap = rlim_stack->rlim_cur; unsigned long pad = stack_guard_gap; /* Account for stack randomization if necessary */ if (current->flags & PF_RANDOMIZE) pad += (STACK_RND_MASK << PAGE_SHIFT); /* Values close to RLIM_INFINITY can overflow. */ if (gap + pad > gap) gap += pad; if (gap < MIN_GAP && MIN_GAP < MAX_GAP) gap = MIN_GAP; else if (gap > MAX_GAP) gap = MAX_GAP; return PAGE_ALIGN(STACK_TOP - gap - rnd); #endif } void arch_pick_mmap_layout(struct mm_struct *mm, const struct rlimit *rlim_stack) { unsigned long random_factor = 0UL; if (current->flags & PF_RANDOMIZE) random_factor = arch_mmap_rnd(); if (mmap_is_legacy(rlim_stack)) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; mm_flags_clear(MMF_TOPDOWN, mm); } else { mm->mmap_base = mmap_base(random_factor, rlim_stack); mm_flags_set(MMF_TOPDOWN, mm); } } #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) void arch_pick_mmap_layout(struct mm_struct *mm, const struct rlimit *rlim_stack) { mm->mmap_base = TASK_UNMAPPED_BASE; mm_flags_clear(MMF_TOPDOWN, mm); } #endif #ifdef CONFIG_MMU EXPORT_SYMBOL_IF_KUNIT(arch_pick_mmap_layout); #endif /** * __account_locked_vm - account locked pages to an mm's locked_vm * @mm: mm to account against * @pages: number of pages to account * @inc: %true if @pages should be considered positive, %false if not * @task: task used to check RLIMIT_MEMLOCK * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped * * Assumes @task and @mm are valid (i.e. at least one reference on each), and * that mmap_lock is held as writer. * * Return: * * 0 on success * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. */ int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, const struct task_struct *task, bool bypass_rlim) { unsigned long locked_vm, limit; int ret = 0; mmap_assert_write_locked(mm); locked_vm = mm->locked_vm; if (inc) { if (!bypass_rlim) { limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT; if (locked_vm + pages > limit) ret = -ENOMEM; } if (!ret) mm->locked_vm = locked_vm + pages; } else { WARN_ON_ONCE(pages > locked_vm); mm->locked_vm = locked_vm - pages; } pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid, (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT, locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK), ret ? " - exceeded" : ""); return ret; } EXPORT_SYMBOL_GPL(__account_locked_vm); /** * account_locked_vm - account locked pages to an mm's locked_vm * @mm: mm to account against, may be NULL * @pages: number of pages to account * @inc: %true if @pages should be considered positive, %false if not * * Assumes a non-NULL @mm is valid (i.e. at least one reference on it). * * Return: * * 0 on success, or if mm is NULL * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. */ int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc) { int ret; if (pages == 0 || !mm) return 0; mmap_write_lock(mm); ret = __account_locked_vm(mm, pages, inc, current, capable(CAP_IPC_LOCK)); mmap_write_unlock(mm); return ret; } EXPORT_SYMBOL_GPL(account_locked_vm); unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flag, unsigned long pgoff) { loff_t off = (loff_t)pgoff << PAGE_SHIFT; unsigned long ret; struct mm_struct *mm = current->mm; unsigned long populate; LIST_HEAD(uf); ret = security_mmap_file(file, prot, flag); if (!ret) ret = fsnotify_mmap_perm(file, prot, off, len); if (!ret) { if (mmap_write_lock_killable(mm)) return -EINTR; ret = do_mmap(file, addr, len, prot, flag, 0, pgoff, &populate, &uf); mmap_write_unlock(mm); userfaultfd_unmap_complete(mm, &uf); if (populate) mm_populate(ret, populate); } return ret; } /* * Perform a userland memory mapping into the current process address space. See * the comment for do_mmap() for more details on this operation in general. * * This differs from do_mmap() in that: * * a. An offset parameter is provided rather than pgoff, which is both checked * for overflow and page alignment. * b. mmap locking is performed on the caller's behalf. * c. Userfaultfd unmap events and memory population are handled. * * This means that this function performs essentially the same work as if * userland were invoking mmap (2). * * Returns either an error, or the address at which the requested mapping has * been performed. */ unsigned long vm_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flag, unsigned long offset) { if (unlikely(offset + PAGE_ALIGN(len) < offset)) return -EINVAL; if (unlikely(offset_in_page(offset))) return -EINVAL; return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); } EXPORT_SYMBOL(vm_mmap); /** * __vmalloc_array - allocate memory for a virtually contiguous array. * @n: number of elements. * @size: element size. * @flags: the type of memory to allocate (see kmalloc). */ void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags) { size_t bytes; if (unlikely(check_mul_overflow(n, size, &bytes))) return NULL; return __vmalloc_noprof(bytes, flags); } EXPORT_SYMBOL(__vmalloc_array_noprof); /** * vmalloc_array - allocate memory for a virtually contiguous array. * @n: number of elements. * @size: element size. */ void *vmalloc_array_noprof(size_t n, size_t size) { return __vmalloc_array_noprof(n, size, GFP_KERNEL); } EXPORT_SYMBOL(vmalloc_array_noprof); /** * __vcalloc - allocate and zero memory for a virtually contiguous array. * @n: number of elements. * @size: element size. * @flags: the type of memory to allocate (see kmalloc). */ void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) { return __vmalloc_array_noprof(n, size, flags | __GFP_ZERO); } EXPORT_SYMBOL(__vcalloc_noprof); /** * vcalloc - allocate and zero memory for a virtually contiguous array. * @n: number of elements. * @size: element size. */ void *vcalloc_noprof(size_t n, size_t size) { return __vmalloc_array_noprof(n, size, GFP_KERNEL | __GFP_ZERO); } EXPORT_SYMBOL(vcalloc_noprof); struct anon_vma *folio_anon_vma(const struct folio *folio) { unsigned long mapping = (unsigned long)folio->mapping; if ((mapping & FOLIO_MAPPING_FLAGS) != FOLIO_MAPPING_ANON) return NULL; return (void *)(mapping - FOLIO_MAPPING_ANON); } /** * folio_mapping - Find the mapping where this folio is stored. * @folio: The folio. * * For folios which are in the page cache, return the mapping that this * page belongs to. Folios in the swap cache return the swap mapping * this page is stored in (which is different from the mapping for the * swap file or swap device where the data is stored). * * You can call this for folios which aren't in the swap cache or page * cache and it will return NULL. */ struct address_space *folio_mapping(const struct folio *folio) { struct address_space *mapping; /* This happens if someone calls flush_dcache_page on slab page */ if (unlikely(folio_test_slab(folio))) return NULL; if (unlikely(folio_test_swapcache(folio))) return swap_address_space(folio->swap); mapping = folio->mapping; if ((unsigned long)mapping & FOLIO_MAPPING_FLAGS) return NULL; return mapping; } EXPORT_SYMBOL(folio_mapping); /** * folio_copy - Copy the contents of one folio to another. * @dst: Folio to copy to. * @src: Folio to copy from. * * The bytes in the folio represented by @src are copied to @dst. * Assumes the caller has validated that @dst is at least as large as @src. * Can be called in atomic context for order-0 folios, but if the folio is * larger, it may sleep. */ void folio_copy(struct folio *dst, struct folio *src) { long i = 0; long nr = folio_nr_pages(src); for (;;) { copy_highpage(folio_page(dst, i), folio_page(src, i)); if (++i == nr) break; cond_resched(); } } EXPORT_SYMBOL(folio_copy); int folio_mc_copy(struct folio *dst, struct folio *src) { long nr = folio_nr_pages(src); long i = 0; for (;;) { if (copy_mc_highpage(folio_page(dst, i), folio_page(src, i))) return -EHWPOISON; if (++i == nr) break; cond_resched(); } return 0; } EXPORT_SYMBOL(folio_mc_copy); int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; static int sysctl_overcommit_ratio __read_mostly = 50; static unsigned long sysctl_overcommit_kbytes __read_mostly; int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ #ifdef CONFIG_SYSCTL static int overcommit_ratio_handler(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret; ret = proc_dointvec(table, write, buffer, lenp, ppos); if (ret == 0 && write) sysctl_overcommit_kbytes = 0; return ret; } static void sync_overcommit_as(struct work_struct *dummy) { percpu_counter_sync(&vm_committed_as); } static int overcommit_policy_handler(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table t; int new_policy = -1; int ret; /* * The deviation of sync_overcommit_as could be big with loose policy * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply * with the strict "NEVER", and to avoid possible race condition (even * though user usually won't too frequently do the switching to policy * OVERCOMMIT_NEVER), the switch is done in the following order: * 1. changing the batch * 2. sync percpu count on each CPU * 3. switch the policy */ if (write) { t = *table; t.data = &new_policy; ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); if (ret || new_policy == -1) return ret; mm_compute_batch(new_policy); if (new_policy == OVERCOMMIT_NEVER) schedule_on_each_cpu(sync_overcommit_as); sysctl_overcommit_memory = new_policy; } else { ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); } return ret; } static int overcommit_kbytes_handler(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret; ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); if (ret == 0 && write) sysctl_overcommit_ratio = 0; return ret; } static const struct ctl_table util_sysctl_table[] = { { .procname = "overcommit_memory", .data = &sysctl_overcommit_memory, .maxlen = sizeof(sysctl_overcommit_memory), .mode = 0644, .proc_handler = overcommit_policy_handler, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_TWO, }, { .procname = "overcommit_ratio", .data = &sysctl_overcommit_ratio, .maxlen = sizeof(sysctl_overcommit_ratio), .mode = 0644, .proc_handler = overcommit_ratio_handler, }, { .procname = "overcommit_kbytes", .data = &sysctl_overcommit_kbytes, .maxlen = sizeof(sysctl_overcommit_kbytes), .mode = 0644, .proc_handler = overcommit_kbytes_handler, }, { .procname = "user_reserve_kbytes", .data = &sysctl_user_reserve_kbytes, .maxlen = sizeof(sysctl_user_reserve_kbytes), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, { .procname = "admin_reserve_kbytes", .data = &sysctl_admin_reserve_kbytes, .maxlen = sizeof(sysctl_admin_reserve_kbytes), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, }; static int __init init_vm_util_sysctls(void) { register_sysctl_init("vm", util_sysctl_table); return 0; } subsys_initcall(init_vm_util_sysctls); #endif /* CONFIG_SYSCTL */ /* * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used */ unsigned long vm_commit_limit(void) { unsigned long allowed; if (sysctl_overcommit_kbytes) allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); else allowed = ((totalram_pages() - hugetlb_total_pages()) * sysctl_overcommit_ratio / 100); allowed += total_swap_pages; return allowed; } /* * Make sure vm_committed_as in one cacheline and not cacheline shared with * other variables. It can be updated by several CPUs frequently. */ struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; /* * The global memory commitment made in the system can be a metric * that can be used to drive ballooning decisions when Linux is hosted * as a guest. On Hyper-V, the host implements a policy engine for dynamically * balancing memory across competing virtual machines that are hosted. * Several metrics drive this policy engine including the guest reported * memory commitment. * * The time cost of this is very low for small platforms, and for big * platform like a 2S/36C/72T Skylake server, in worst case where * vm_committed_as's spinlock is under severe contention, the time cost * could be about 30~40 microseconds. */ unsigned long vm_memory_committed(void) { return percpu_counter_sum_positive(&vm_committed_as); } EXPORT_SYMBOL_GPL(vm_memory_committed); /* * Check that a process has enough memory to allocate a new virtual * mapping. 0 means there is enough memory for the allocation to * succeed and -ENOMEM implies there is not. * * We currently support three overcommit policies, which are set via the * vm.overcommit_memory sysctl. See Documentation/mm/overcommit-accounting.rst * * Strict overcommit modes added 2002 Feb 26 by Alan Cox. * Additional code 2002 Jul 20 by Robert Love. * * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. * * Note this is a helper function intended to be used by LSMs which * wish to use this logic. */ int __vm_enough_memory(const struct mm_struct *mm, long pages, int cap_sys_admin) { long allowed; unsigned long bytes_failed; vm_acct_memory(pages); /* * Sometimes we want to use more memory than we have */ if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) return 0; if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { if (pages > totalram_pages() + total_swap_pages) goto error; return 0; } allowed = vm_commit_limit(); /* * Reserve some for root */ if (!cap_sys_admin) allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); /* * Don't let a single process grow so big a user can't recover */ if (mm) { long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); allowed -= min_t(long, mm->total_vm / 32, reserve); } if (percpu_counter_read_positive(&vm_committed_as) < allowed) return 0; error: bytes_failed = pages << PAGE_SHIFT; pr_warn_ratelimited("%s: pid: %d, comm: %s, bytes: %lu not enough memory for the allocation\n", __func__, current->pid, current->comm, bytes_failed); vm_unacct_memory(pages); return -ENOMEM; } /** * get_cmdline() - copy the cmdline value to a buffer. * @task: the task whose cmdline value to copy. * @buffer: the buffer to copy to. * @buflen: the length of the buffer. Larger cmdline values are truncated * to this length. * * Return: the size of the cmdline field copied. Note that the copy does * not guarantee an ending NULL byte. */ int get_cmdline(struct task_struct *task, char *buffer, int buflen) { int res = 0; unsigned int len; struct mm_struct *mm = get_task_mm(task); unsigned long arg_start, arg_end, env_start, env_end; if (!mm) goto out; if (!mm->arg_end) goto out_mm; /* Shh! No looking before we're done */ spin_lock(&mm->arg_lock); arg_start = mm->arg_start; arg_end = mm->arg_end; env_start = mm->env_start; env_end = mm->env_end; spin_unlock(&mm->arg_lock); len = arg_end - arg_start; if (len > buflen) len = buflen; res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE); /* * If the nul at the end of args has been overwritten, then * assume application is using setproctitle(3). */ if (res > 0 && buffer[res-1] != '\0' && len < buflen) { len = strnlen(buffer, res); if (len < res) { res = len; } else { len = env_end - env_start; if (len > buflen - res) len = buflen - res; res += access_process_vm(task, env_start, buffer+res, len, FOLL_FORCE); res = strnlen(buffer, res); } } out_mm: mmput(mm); out: return res; } int __weak memcmp_pages(struct page *page1, struct page *page2) { char *addr1, *addr2; int ret; addr1 = kmap_local_page(page1); addr2 = kmap_local_page(page2); ret = memcmp(addr1, addr2, PAGE_SIZE); kunmap_local(addr2); kunmap_local(addr1); return ret; } #ifdef CONFIG_PRINTK /** * mem_dump_obj - Print available provenance information * @object: object for which to find provenance information. * * This function uses pr_cont(), so that the caller is expected to have * printed out whatever preamble is appropriate. The provenance information * depends on the type of object and on how much debugging is enabled. * For example, for a slab-cache object, the slab name is printed, and, * if available, the return address and stack trace from the allocation * and last free path of that object. */ void mem_dump_obj(void *object) { const char *type; if (kmem_dump_obj(object)) return; if (vmalloc_dump_obj(object)) return; if (is_vmalloc_addr(object)) type = "vmalloc memory"; else if (virt_addr_valid(object)) type = "non-slab/vmalloc memory"; else if (object == NULL) type = "NULL pointer"; else if (object == ZERO_SIZE_PTR) type = "zero-size pointer"; else type = "non-paged memory"; pr_cont(" %s\n", type); } EXPORT_SYMBOL_GPL(mem_dump_obj); #endif /* * A driver might set a page logically offline -- PageOffline() -- and * turn the page inaccessible in the hypervisor; after that, access to page * content can be fatal. * * Some special PFN walkers -- i.e., /proc/kcore -- read content of random * pages after checking PageOffline(); however, these PFN walkers can race * with drivers that set PageOffline(). * * page_offline_freeze()/page_offline_thaw() allows for a subsystem to * synchronize with such drivers, achieving that a page cannot be set * PageOffline() while frozen. * * page_offline_begin()/page_offline_end() is used by drivers that care about * such races when setting a page PageOffline(). */ static DECLARE_RWSEM(page_offline_rwsem); void page_offline_freeze(void) { down_read(&page_offline_rwsem); } void page_offline_thaw(void) { up_read(&page_offline_rwsem); } void page_offline_begin(void) { down_write(&page_offline_rwsem); } EXPORT_SYMBOL(page_offline_begin); void page_offline_end(void) { up_write(&page_offline_rwsem); } EXPORT_SYMBOL(page_offline_end); #ifndef flush_dcache_folio void flush_dcache_folio(struct folio *folio) { long i, nr = folio_nr_pages(folio); for (i = 0; i < nr; i++) flush_dcache_page(folio_page(folio, i)); } EXPORT_SYMBOL(flush_dcache_folio); #endif /** * __compat_vma_mmap() - See description for compat_vma_mmap() * for details. This is the same operation, only with a specific file operations * struct which may or may not be the same as vma->vm_file->f_op. * @f_op: The file operations whose .mmap_prepare() hook is specified. * @file: The file which backs or will back the mapping. * @vma: The VMA to apply the .mmap_prepare() hook to. * Returns: 0 on success or error. */ int __compat_vma_mmap(const struct file_operations *f_op, struct file *file, struct vm_area_struct *vma) { struct vm_area_desc desc = { .mm = vma->vm_mm, .file = file, .start = vma->vm_start, .end = vma->vm_end, .pgoff = vma->vm_pgoff, .vm_file = vma->vm_file, .vma_flags = vma->flags, .page_prot = vma->vm_page_prot, .action.type = MMAP_NOTHING, /* Default */ }; int err; err = f_op->mmap_prepare(&desc); if (err) return err; mmap_action_prepare(&desc.action, &desc); set_vma_from_desc(vma, &desc); return mmap_action_complete(&desc.action, vma); } EXPORT_SYMBOL(__compat_vma_mmap); /** * compat_vma_mmap() - Apply the file's .mmap_prepare() hook to an * existing VMA and execute any requested actions. * @file: The file which possesss an f_op->mmap_prepare() hook. * @vma: The VMA to apply the .mmap_prepare() hook to. * * Ordinarily, .mmap_prepare() is invoked directly upon mmap(). However, certain * stacked filesystems invoke a nested mmap hook of an underlying file. * * Until all filesystems are converted to use .mmap_prepare(), we must be * conservative and continue to invoke these stacked filesystems using the * deprecated .mmap() hook. * * However we have a problem if the underlying file system possesses an * .mmap_prepare() hook, as we are in a different context when we invoke the * .mmap() hook, already having a VMA to deal with. * * compat_vma_mmap() is a compatibility function that takes VMA state, * establishes a struct vm_area_desc descriptor, passes to the underlying * .mmap_prepare() hook and applies any changes performed by it. * * Once the conversion of filesystems is complete this function will no longer * be required and will be removed. * * Returns: 0 on success or error. */ int compat_vma_mmap(struct file *file, struct vm_area_struct *vma) { return __compat_vma_mmap(file->f_op, file, vma); } EXPORT_SYMBOL(compat_vma_mmap); static void set_ps_flags(struct page_snapshot *ps, const struct folio *folio, const struct page *page) { /* * Only the first page of a high-order buddy page has PageBuddy() set. * So we have to check manually whether this page is part of a high- * order buddy page. */ if (PageBuddy(page)) ps->flags |= PAGE_SNAPSHOT_PG_BUDDY; else if (page_count(page) == 0 && is_free_buddy_page(page)) ps->flags |= PAGE_SNAPSHOT_PG_BUDDY; if (folio_test_idle(folio)) ps->flags |= PAGE_SNAPSHOT_PG_IDLE; } /** * snapshot_page() - Create a snapshot of a struct page * @ps: Pointer to a struct page_snapshot to store the page snapshot * @page: The page to snapshot * * Create a snapshot of the page and store both its struct page and struct * folio representations in @ps. * * A snapshot is marked as "faithful" if the compound state of @page was * stable and allowed safe reconstruction of the folio representation. In * rare cases where this is not possible (e.g. due to folio splitting), * snapshot_page() falls back to treating @page as a single page and the * snapshot is marked as "unfaithful". The snapshot_page_is_faithful() * helper can be used to check for this condition. */ void snapshot_page(struct page_snapshot *ps, const struct page *page) { unsigned long head, nr_pages = 1; struct folio *foliop; int loops = 5; ps->pfn = page_to_pfn(page); ps->flags = PAGE_SNAPSHOT_FAITHFUL; again: memset(&ps->folio_snapshot, 0, sizeof(struct folio)); memcpy(&ps->page_snapshot, page, sizeof(*page)); head = ps->page_snapshot.compound_head; if ((head & 1) == 0) { ps->idx = 0; foliop = (struct folio *)&ps->page_snapshot; if (!folio_test_large(foliop)) { set_ps_flags(ps, page_folio(page), page); memcpy(&ps->folio_snapshot, foliop, sizeof(struct page)); return; } foliop = (struct folio *)page; } else { foliop = (struct folio *)(head - 1); ps->idx = folio_page_idx(foliop, page); } if (ps->idx < MAX_FOLIO_NR_PAGES) { memcpy(&ps->folio_snapshot, foliop, 2 * sizeof(struct page)); nr_pages = folio_nr_pages(&ps->folio_snapshot); if (nr_pages > 1) memcpy(&ps->folio_snapshot.__page_2, &foliop->__page_2, sizeof(struct page)); set_ps_flags(ps, foliop, page); } if (ps->idx > nr_pages) { if (loops-- > 0) goto again; clear_compound_head(&ps->page_snapshot); foliop = (struct folio *)&ps->page_snapshot; memcpy(&ps->folio_snapshot, foliop, sizeof(struct page)); ps->flags = 0; ps->idx = 0; } } static int mmap_action_finish(struct mmap_action *action, const struct vm_area_struct *vma, int err) { /* * If an error occurs, unmap the VMA altogether and return an error. We * only clear the newly allocated VMA, since this function is only * invoked if we do NOT merge, so we only clean up the VMA we created. */ if (err) { const size_t len = vma_pages(vma) << PAGE_SHIFT; do_munmap(current->mm, vma->vm_start, len, NULL); if (action->error_hook) { /* We may want to filter the error. */ err = action->error_hook(err); /* The caller should not clear the error. */ VM_WARN_ON_ONCE(!err); } return err; } if (action->success_hook) return action->success_hook(vma); return 0; } #ifdef CONFIG_MMU /** * mmap_action_prepare - Perform preparatory setup for an VMA descriptor * action which need to be performed. * @desc: The VMA descriptor to prepare for @action. * @action: The action to perform. */ void mmap_action_prepare(struct mmap_action *action, struct vm_area_desc *desc) { switch (action->type) { case MMAP_NOTHING: break; case MMAP_REMAP_PFN: remap_pfn_range_prepare(desc, action->remap.start_pfn); break; case MMAP_IO_REMAP_PFN: io_remap_pfn_range_prepare(desc, action->remap.start_pfn, action->remap.size); break; } } EXPORT_SYMBOL(mmap_action_prepare); /** * mmap_action_complete - Execute VMA descriptor action. * @action: The action to perform. * @vma: The VMA to perform the action upon. * * Similar to mmap_action_prepare(). * * Return: 0 on success, or error, at which point the VMA will be unmapped. */ int mmap_action_complete(struct mmap_action *action, struct vm_area_struct *vma) { int err = 0; switch (action->type) { case MMAP_NOTHING: break; case MMAP_REMAP_PFN: err = remap_pfn_range_complete(vma, action->remap.start, action->remap.start_pfn, action->remap.size, action->remap.pgprot); break; case MMAP_IO_REMAP_PFN: err = io_remap_pfn_range_complete(vma, action->remap.start, action->remap.start_pfn, action->remap.size, action->remap.pgprot); break; } return mmap_action_finish(action, vma, err); } EXPORT_SYMBOL(mmap_action_complete); #else void mmap_action_prepare(struct mmap_action *action, struct vm_area_desc *desc) { switch (action->type) { case MMAP_NOTHING: break; case MMAP_REMAP_PFN: case MMAP_IO_REMAP_PFN: WARN_ON_ONCE(1); /* nommu cannot handle these. */ break; } } EXPORT_SYMBOL(mmap_action_prepare); int mmap_action_complete(struct mmap_action *action, struct vm_area_struct *vma) { int err = 0; switch (action->type) { case MMAP_NOTHING: break; case MMAP_REMAP_PFN: case MMAP_IO_REMAP_PFN: WARN_ON_ONCE(1); /* nommu cannot handle this. */ err = -EINVAL; break; } return mmap_action_finish(action, vma, err); } EXPORT_SYMBOL(mmap_action_complete); #endif #ifdef CONFIG_MMU /** * folio_pte_batch - detect a PTE batch for a large folio * @folio: The large folio to detect a PTE batch for. * @ptep: Page table pointer for the first entry. * @pte: Page table entry for the first page. * @max_nr: The maximum number of table entries to consider. * * This is a simplified variant of folio_pte_batch_flags(). * * Detect a PTE batch: consecutive (present) PTEs that map consecutive * pages of the same large folio in a single VMA and a single page table. * * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN, * the accessed bit, writable bit, dirt-bit and soft-dirty bit. * * ptep must map any page of the folio. max_nr must be at least one and * must be limited by the caller so scanning cannot exceed a single VMA and * a single page table. * * Return: the number of table entries in the batch. */ unsigned int folio_pte_batch(struct folio *folio, pte_t *ptep, pte_t pte, unsigned int max_nr) { return folio_pte_batch_flags(folio, NULL, ptep, &pte, max_nr, 0); } #endif /* CONFIG_MMU */ #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) /** * page_range_contiguous - test whether the page range is contiguous * @page: the start of the page range. * @nr_pages: the number of pages in the range. * * Test whether the page range is contiguous, such that they can be iterated * naively, corresponding to iterating a contiguous PFN range. * * This function should primarily only be used for debug checks, or when * working with page ranges that are not naturally contiguous (e.g., pages * within a folio are). * * Returns true if contiguous, otherwise false. */ bool page_range_contiguous(const struct page *page, unsigned long nr_pages) { const unsigned long start_pfn = page_to_pfn(page); const unsigned long end_pfn = start_pfn + nr_pages; unsigned long pfn; /* * The memmap is allocated per memory section, so no need to check * within the first section. However, we need to check each other * spanned memory section once, making sure the first page in a * section could similarly be reached by just iterating pages. */ for (pfn = ALIGN(start_pfn, PAGES_PER_SECTION); pfn < end_pfn; pfn += PAGES_PER_SECTION) if (unlikely(page + (pfn - start_pfn) != pfn_to_page(pfn))) return false; return true; } EXPORT_SYMBOL(page_range_contiguous); #endif
1244 1245 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 Politecnico di Torino, Italy * TORSEC group -- https://security.polito.it * * Author: Roberto Sassu <roberto.sassu@polito.it> * * File: ima_template.c * Helpers to manage template descriptors. */ #include <linux/rculist.h> #include "ima.h" #include "ima_template_lib.h" enum header_fields { HDR_PCR, HDR_DIGEST, HDR_TEMPLATE_NAME, HDR_TEMPLATE_DATA, HDR__LAST }; static struct ima_template_desc builtin_templates[] = { {.name = IMA_TEMPLATE_IMA_NAME, .fmt = IMA_TEMPLATE_IMA_FMT}, {.name = "ima-ng", .fmt = "d-ng|n-ng"}, {.name = "ima-sig", .fmt = "d-ng|n-ng|sig"}, {.name = "ima-ngv2", .fmt = "d-ngv2|n-ng"}, {.name = "ima-sigv2", .fmt = "d-ngv2|n-ng|sig"}, {.name = "ima-buf", .fmt = "d-ng|n-ng|buf"}, {.name = "ima-modsig", .fmt = "d-ng|n-ng|sig|d-modsig|modsig"}, {.name = "evm-sig", .fmt = "d-ng|n-ng|evmsig|xattrnames|xattrlengths|xattrvalues|iuid|igid|imode"}, {.name = "", .fmt = ""}, /* placeholder for a custom format */ }; static LIST_HEAD(defined_templates); static DEFINE_SPINLOCK(template_list); static int template_setup_done; static const struct ima_template_field supported_fields[] = { {.field_id = "d", .field_init = ima_eventdigest_init, .field_show = ima_show_template_digest}, {.field_id = "n", .field_init = ima_eventname_init, .field_show = ima_show_template_string}, {.field_id = "d-ng", .field_init = ima_eventdigest_ng_init, .field_show = ima_show_template_digest_ng}, {.field_id = "d-ngv2", .field_init = ima_eventdigest_ngv2_init, .field_show = ima_show_template_digest_ngv2}, {.field_id = "n-ng", .field_init = ima_eventname_ng_init, .field_show = ima_show_template_string}, {.field_id = "sig", .field_init = ima_eventsig_init, .field_show = ima_show_template_sig}, {.field_id = "buf", .field_init = ima_eventbuf_init, .field_show = ima_show_template_buf}, {.field_id = "d-modsig", .field_init = ima_eventdigest_modsig_init, .field_show = ima_show_template_digest_ng}, {.field_id = "modsig", .field_init = ima_eventmodsig_init, .field_show = ima_show_template_sig}, {.field_id = "evmsig", .field_init = ima_eventevmsig_init, .field_show = ima_show_template_sig}, {.field_id = "iuid", .field_init = ima_eventinodeuid_init, .field_show = ima_show_template_uint}, {.field_id = "igid", .field_init = ima_eventinodegid_init, .field_show = ima_show_template_uint}, {.field_id = "imode", .field_init = ima_eventinodemode_init, .field_show = ima_show_template_uint}, {.field_id = "xattrnames", .field_init = ima_eventinodexattrnames_init, .field_show = ima_show_template_string}, {.field_id = "xattrlengths", .field_init = ima_eventinodexattrlengths_init, .field_show = ima_show_template_sig}, {.field_id = "xattrvalues", .field_init = ima_eventinodexattrvalues_init, .field_show = ima_show_template_sig}, }; /* * Used when restoring measurements carried over from a kexec. 'd' and 'n' don't * need to be accounted for since they shouldn't be defined in the same template * description as 'd-ng' and 'n-ng' respectively. */ #define MAX_TEMPLATE_NAME_LEN \ sizeof("d-ng|n-ng|evmsig|xattrnames|xattrlengths|xattrvalues|iuid|igid|imode") static struct ima_template_desc *ima_template; static struct ima_template_desc *ima_buf_template; /** * ima_template_has_modsig - Check whether template has modsig-related fields. * @ima_template: IMA template to check. * * Tells whether the given template has fields referencing a file's appended * signature. */ bool ima_template_has_modsig(const struct ima_template_desc *ima_template) { int i; for (i = 0; i < ima_template->num_fields; i++) if (!strcmp(ima_template->fields[i]->field_id, "modsig") || !strcmp(ima_template->fields[i]->field_id, "d-modsig")) return true; return false; } static int __init ima_template_setup(char *str) { struct ima_template_desc *template_desc; int template_len = strlen(str); if (template_setup_done) return 1; if (!ima_template) ima_init_template_list(); /* * Verify that a template with the supplied name exists. * If not, use CONFIG_IMA_DEFAULT_TEMPLATE. */ template_desc = lookup_template_desc(str); if (!template_desc) { pr_err("template %s not found, using %s\n", str, CONFIG_IMA_DEFAULT_TEMPLATE); return 1; } /* * Verify whether the current hash algorithm is supported * by the 'ima' template. */ if (template_len == 3 && strcmp(str, IMA_TEMPLATE_IMA_NAME) == 0 && ima_hash_algo != HASH_ALGO_SHA1 && ima_hash_algo != HASH_ALGO_MD5) { pr_err("template does not support hash alg\n"); return 1; } ima_template = template_desc; template_setup_done = 1; return 1; } __setup("ima_template=", ima_template_setup); static int __init ima_template_fmt_setup(char *str) { int num_templates = ARRAY_SIZE(builtin_templates); if (template_setup_done) return 1; if (template_desc_init_fields(str, NULL, NULL) < 0) { pr_err("format string '%s' not valid, using template %s\n", str, CONFIG_IMA_DEFAULT_TEMPLATE); return 1; } builtin_templates[num_templates - 1].fmt = str; ima_template = builtin_templates + num_templates - 1; template_setup_done = 1; return 1; } __setup("ima_template_fmt=", ima_template_fmt_setup); struct ima_template_desc *lookup_template_desc(const char *name) { struct ima_template_desc *template_desc; int found = 0; rcu_read_lock(); list_for_each_entry_rcu(template_desc, &defined_templates, list) { if ((strcmp(template_desc->name, name) == 0) || (strcmp(template_desc->fmt, name) == 0)) { found = 1; break; } } rcu_read_unlock(); return found ? template_desc : NULL; } static const struct ima_template_field * lookup_template_field(const char *field_id) { int i; for (i = 0; i < ARRAY_SIZE(supported_fields); i++) if (strncmp(supported_fields[i].field_id, field_id, IMA_TEMPLATE_FIELD_ID_MAX_LEN) == 0) return &supported_fields[i]; return NULL; } static int template_fmt_size(const char *template_fmt) { char c; int template_fmt_len = strlen(template_fmt); int i = 0, j = 0; while (i < template_fmt_len) { c = template_fmt[i]; if (c == '|') j++; i++; } return j + 1; } int template_desc_init_fields(const char *template_fmt, const struct ima_template_field ***fields, int *num_fields) { const char *template_fmt_ptr; const struct ima_template_field *found_fields[IMA_TEMPLATE_NUM_FIELDS_MAX]; int template_num_fields; int i, len; if (num_fields && *num_fields > 0) /* already initialized? */ return 0; template_num_fields = template_fmt_size(template_fmt); if (template_num_fields > IMA_TEMPLATE_NUM_FIELDS_MAX) { pr_err("format string '%s' contains too many fields\n", template_fmt); return -EINVAL; } for (i = 0, template_fmt_ptr = template_fmt; i < template_num_fields; i++, template_fmt_ptr += len + 1) { char tmp_field_id[IMA_TEMPLATE_FIELD_ID_MAX_LEN + 1]; len = strchrnul(template_fmt_ptr, '|') - template_fmt_ptr; if (len == 0 || len > IMA_TEMPLATE_FIELD_ID_MAX_LEN) { pr_err("Invalid field with length %d\n", len); return -EINVAL; } memcpy(tmp_field_id, template_fmt_ptr, len); tmp_field_id[len] = '\0'; found_fields[i] = lookup_template_field(tmp_field_id); if (!found_fields[i]) { pr_err("field '%s' not found\n", tmp_field_id); return -ENOENT; } } if (fields && num_fields) { *fields = kmalloc_objs(**fields, i); if (*fields == NULL) return -ENOMEM; memcpy(*fields, found_fields, i * sizeof(**fields)); *num_fields = i; } return 0; } void ima_init_template_list(void) { int i; if (!list_empty(&defined_templates)) return; spin_lock(&template_list); for (i = 0; i < ARRAY_SIZE(builtin_templates); i++) { list_add_tail_rcu(&builtin_templates[i].list, &defined_templates); } spin_unlock(&template_list); } struct ima_template_desc *ima_template_desc_current(void) { if (!ima_template) { ima_init_template_list(); ima_template = lookup_template_desc(CONFIG_IMA_DEFAULT_TEMPLATE); } return ima_template; } struct ima_template_desc *ima_template_desc_buf(void) { if (!ima_buf_template) { ima_init_template_list(); ima_buf_template = lookup_template_desc("ima-buf"); } return ima_buf_template; } int __init ima_init_template(void) { struct ima_template_desc *template = ima_template_desc_current(); int result; result = template_desc_init_fields(template->fmt, &(template->fields), &(template->num_fields)); if (result < 0) { pr_err("template %s init failed, result: %d\n", (strlen(template->name) ? template->name : template->fmt), result); return result; } template = ima_template_desc_buf(); if (!template) { pr_err("Failed to get ima-buf template\n"); return -EINVAL; } result = template_desc_init_fields(template->fmt, &(template->fields), &(template->num_fields)); if (result < 0) pr_err("template %s init failed, result: %d\n", (strlen(template->name) ? template->name : template->fmt), result); return result; } static struct ima_template_desc *restore_template_fmt(char *template_name) { struct ima_template_desc *template_desc = NULL; int ret; ret = template_desc_init_fields(template_name, NULL, NULL); if (ret < 0) { pr_err("attempting to initialize the template \"%s\" failed\n", template_name); goto out; } template_desc = kzalloc_obj(*template_desc); if (!template_desc) goto out; template_desc->name = ""; template_desc->fmt = kstrdup(template_name, GFP_KERNEL); if (!template_desc->fmt) { kfree(template_desc); template_desc = NULL; goto out; } spin_lock(&template_list); list_add_tail_rcu(&template_desc->list, &defined_templates); spin_unlock(&template_list); out: return template_desc; } static int ima_restore_template_data(struct ima_template_desc *template_desc, void *template_data, int template_data_size, struct ima_template_entry **entry) { struct tpm_digest *digests; int ret = 0; int i; *entry = kzalloc_flex(**entry, template_data, template_desc->num_fields, GFP_NOFS); if (!*entry) return -ENOMEM; digests = kzalloc_objs(*digests, NR_BANKS(ima_tpm_chip) + ima_extra_slots, GFP_NOFS); if (!digests) { kfree(*entry); return -ENOMEM; } (*entry)->digests = digests; ret = ima_parse_buf(template_data, template_data + template_data_size, NULL, template_desc->num_fields, (*entry)->template_data, NULL, NULL, ENFORCE_FIELDS | ENFORCE_BUFEND, "template data"); if (ret < 0) { kfree((*entry)->digests); kfree(*entry); return ret; } (*entry)->template_desc = template_desc; for (i = 0; i < template_desc->num_fields; i++) { struct ima_field_data *field_data = &(*entry)->template_data[i]; u8 *data = field_data->data; (*entry)->template_data[i].data = kzalloc(field_data->len + 1, GFP_KERNEL); if (!(*entry)->template_data[i].data) { ret = -ENOMEM; break; } memcpy((*entry)->template_data[i].data, data, field_data->len); (*entry)->template_data_len += sizeof(field_data->len); (*entry)->template_data_len += field_data->len; } if (ret < 0) { ima_free_template_entry(*entry); *entry = NULL; } return ret; } /* Restore the serialized binary measurement list without extending PCRs. */ int ima_restore_measurement_list(loff_t size, void *buf) { char template_name[MAX_TEMPLATE_NAME_LEN]; unsigned char zero[TPM_DIGEST_SIZE] = { 0 }; struct ima_kexec_hdr *khdr = buf; struct ima_field_data hdr[HDR__LAST] = { [HDR_PCR] = {.len = sizeof(u32)}, [HDR_DIGEST] = {.len = TPM_DIGEST_SIZE}, }; void *bufp = buf + sizeof(*khdr); void *bufendp; struct ima_template_entry *entry; struct ima_template_desc *template_desc; DECLARE_BITMAP(hdr_mask, HDR__LAST); unsigned long count = 0; int ret = 0; if (!buf || size < sizeof(*khdr)) return 0; if (ima_canonical_fmt) { khdr->version = le16_to_cpu((__force __le16)khdr->version); khdr->count = le64_to_cpu((__force __le64)khdr->count); khdr->buffer_size = le64_to_cpu((__force __le64)khdr->buffer_size); } if (khdr->version != 1) { pr_err("attempting to restore a incompatible measurement list"); return -EINVAL; } if (khdr->count > ULONG_MAX - 1) { pr_err("attempting to restore too many measurements"); return -EINVAL; } bitmap_zero(hdr_mask, HDR__LAST); bitmap_set(hdr_mask, HDR_PCR, 1); bitmap_set(hdr_mask, HDR_DIGEST, 1); /* * ima kexec buffer prefix: version, buffer size, count * v1 format: pcr, digest, template-name-len, template-name, * template-data-size, template-data */ bufendp = buf + khdr->buffer_size; while ((bufp < bufendp) && (count++ < khdr->count)) { int enforce_mask = ENFORCE_FIELDS; enforce_mask |= (count == khdr->count) ? ENFORCE_BUFEND : 0; ret = ima_parse_buf(bufp, bufendp, &bufp, HDR__LAST, hdr, NULL, hdr_mask, enforce_mask, "entry header"); if (ret < 0) break; if (hdr[HDR_TEMPLATE_NAME].len >= MAX_TEMPLATE_NAME_LEN) { pr_err("attempting to restore a template name that is too long\n"); ret = -EINVAL; break; } /* template name is not null terminated */ memcpy(template_name, hdr[HDR_TEMPLATE_NAME].data, hdr[HDR_TEMPLATE_NAME].len); template_name[hdr[HDR_TEMPLATE_NAME].len] = 0; if (strcmp(template_name, "ima") == 0) { pr_err("attempting to restore an unsupported template \"%s\" failed\n", template_name); ret = -EINVAL; break; } template_desc = lookup_template_desc(template_name); if (!template_desc) { template_desc = restore_template_fmt(template_name); if (!template_desc) break; } /* * Only the running system's template format is initialized * on boot. As needed, initialize the other template formats. */ ret = template_desc_init_fields(template_desc->fmt, &(template_desc->fields), &(template_desc->num_fields)); if (ret < 0) { pr_err("attempting to restore the template fmt \"%s\" failed\n", template_desc->fmt); ret = -EINVAL; break; } ret = ima_restore_template_data(template_desc, hdr[HDR_TEMPLATE_DATA].data, hdr[HDR_TEMPLATE_DATA].len, &entry); if (ret < 0) break; if (memcmp(hdr[HDR_DIGEST].data, zero, sizeof(zero))) { ret = ima_calc_field_array_hash( &entry->template_data[0], entry); if (ret < 0) { pr_err("cannot calculate template digest\n"); ret = -EINVAL; break; } } entry->pcr = !ima_canonical_fmt ? *(u32 *)(hdr[HDR_PCR].data) : le32_to_cpu(*(__le32 *)(hdr[HDR_PCR].data)); ret = ima_restore_measurement_entry(entry); if (ret < 0) break; } return ret; }
30 30 30 30 25 25 5 1 6 6 6 3 4 4 4 3 3 1 3 1 3 4 4 4 4 4 4 4 3 1 215 215 215 9 135 135 125 9 135 8 8 8 5 5 5 3 3 3 14 14 14 13 14 95 95 92 6 95 6 3 6 3 90 90 88 87 7 87 7 87 87 3 8 144 2 142 144 134 134 134 5 2 123 134 96 96 2 2 100 5 3 2 5 101 100 46 98 90 98 97 4 86 1 85 85 85 85 85 7 79 79 79 85 86 7 7 7 7 7 7 7 95 87 87 87 86 86 85 1 86 85 7 85 85 7 7 7 1 7 7 7 6 7 7 79 85 85 7 85 78 2 83 85 85 1 95 97 97 2 95 94 2 95 95 70 70 70 1 69 69 69 69 69 69 69 53 12 11 11 1 133 131 133 53 133 84 53 1 1 53 53 133 133 133 133 91 133 131 133 133 133 133 133 132 132 30 131 130 83 3 83 49 28 24 24 24 24 44 1 136 136 136 134 1 133 1 133 32 130 130 130 130 130 130 129 129 129 129 3 129 129 129 89 89 89 129 129 129 127 127 127 89 89 127 38 127 127 89 161 161 156 9 8 147 147 147 30 147 102 2 101 97 7 97 93 8 93 137 130 40 129 84 91 70 69 129 129 127 46 8 1 127 122 11 116 66 127 127 127 4 161 161 161 161 161 156 106 161 107 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 // SPDX-License-Identifier: GPL-2.0-only /* * * Copyright (C) 2011 Novell Inc. */ #include <uapi/linux/magic.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/xattr.h> #include <linux/mount.h> #include <linux/parser.h> #include <linux/module.h> #include <linux/statfs.h> #include <linux/seq_file.h> #include <linux/posix_acl_xattr.h> #include <linux/exportfs.h> #include <linux/file.h> #include <linux/fs_context.h> #include <linux/fs_parser.h> #include "overlayfs.h" #include "params.h" MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>"); MODULE_DESCRIPTION("Overlay filesystem"); MODULE_LICENSE("GPL"); struct ovl_dir_cache; static struct dentry *ovl_d_real(struct dentry *dentry, enum d_real_type type) { struct dentry *upper, *lower; int err; switch (type) { case D_REAL_DATA: case D_REAL_METADATA: break; default: goto bug; } if (!d_is_reg(dentry)) { /* d_real_inode() is only relevant for regular files */ return dentry; } upper = ovl_dentry_upper(dentry); if (upper && (type == D_REAL_METADATA || ovl_has_upperdata(d_inode(dentry)))) return upper; if (type == D_REAL_METADATA) { lower = ovl_dentry_lower(dentry); goto real_lower; } /* * Best effort lazy lookup of lowerdata for D_REAL_DATA case to return * the real lowerdata dentry. The only current caller of d_real() with * D_REAL_DATA is d_real_inode() from trace_uprobe and this caller is * likely going to be followed reading from the file, before placing * uprobes on offset within the file, so lowerdata should be available * when setting the uprobe. */ err = ovl_verify_lowerdata(dentry); if (err) goto bug; lower = ovl_dentry_lowerdata(dentry); if (!lower) goto bug; real_lower: /* Handle recursion into stacked lower fs */ return d_real(lower, type); bug: WARN(1, "%s(%pd4, %d): real dentry not found\n", __func__, dentry, type); return dentry; } static int ovl_revalidate_real(struct dentry *d, unsigned int flags, bool weak) { int ret = 1; if (!d) return 1; if (weak) { if (d->d_flags & DCACHE_OP_WEAK_REVALIDATE) ret = d->d_op->d_weak_revalidate(d, flags); } else if (d->d_flags & DCACHE_OP_REVALIDATE) { struct dentry *parent; struct inode *dir; struct name_snapshot n; if (flags & LOOKUP_RCU) { parent = READ_ONCE(d->d_parent); dir = d_inode_rcu(parent); if (!dir) return -ECHILD; } else { parent = dget_parent(d); dir = d_inode(parent); } take_dentry_name_snapshot(&n, d); ret = d->d_op->d_revalidate(dir, &n.name, d, flags); release_dentry_name_snapshot(&n); if (!(flags & LOOKUP_RCU)) dput(parent); if (!ret) { if (!(flags & LOOKUP_RCU)) d_invalidate(d); ret = -ESTALE; } } return ret; } static int ovl_dentry_revalidate_common(struct dentry *dentry, unsigned int flags, bool weak) { struct ovl_entry *oe; struct ovl_path *lowerstack; struct inode *inode = d_inode_rcu(dentry); struct dentry *upper; unsigned int i; int ret = 1; if (!inode) { /* * Lookup of negative dentries will call ovl_dentry_init_flags() * with NULL upperdentry and NULL oe, resulting in the * DCACHE_OP*_REVALIDATE flags being cleared. Hence the only * way to get a negative inode is due to a race with dentry * destruction. */ WARN_ON(!(flags & LOOKUP_RCU)); return -ECHILD; } oe = OVL_I_E(inode); lowerstack = ovl_lowerstack(oe); upper = ovl_i_dentry_upper(inode); if (upper) ret = ovl_revalidate_real(upper, flags, weak); for (i = 0; ret > 0 && i < ovl_numlower(oe); i++) ret = ovl_revalidate_real(lowerstack[i].dentry, flags, weak); return ret; } static int ovl_dentry_revalidate(struct inode *dir, const struct qstr *name, struct dentry *dentry, unsigned int flags) { return ovl_dentry_revalidate_common(dentry, flags, false); } static int ovl_dentry_weak_revalidate(struct dentry *dentry, unsigned int flags) { return ovl_dentry_revalidate_common(dentry, flags, true); } static const struct dentry_operations ovl_dentry_operations = { .d_real = ovl_d_real, .d_revalidate = ovl_dentry_revalidate, .d_weak_revalidate = ovl_dentry_weak_revalidate, }; #if IS_ENABLED(CONFIG_UNICODE) static const struct dentry_operations ovl_dentry_ci_operations = { .d_real = ovl_d_real, .d_revalidate = ovl_dentry_revalidate, .d_weak_revalidate = ovl_dentry_weak_revalidate, .d_hash = generic_ci_d_hash, .d_compare = generic_ci_d_compare, }; #endif static struct kmem_cache *ovl_inode_cachep; static struct inode *ovl_alloc_inode(struct super_block *sb) { struct ovl_inode *oi = alloc_inode_sb(sb, ovl_inode_cachep, GFP_KERNEL); if (!oi) return NULL; oi->cache = NULL; oi->redirect = NULL; oi->version = 0; oi->flags = 0; oi->__upperdentry = NULL; oi->lowerdata_redirect = NULL; oi->oe = NULL; mutex_init(&oi->lock); return &oi->vfs_inode; } static void ovl_free_inode(struct inode *inode) { struct ovl_inode *oi = OVL_I(inode); kfree(oi->redirect); kfree(oi->oe); mutex_destroy(&oi->lock); kmem_cache_free(ovl_inode_cachep, oi); } static void ovl_destroy_inode(struct inode *inode) { struct ovl_inode *oi = OVL_I(inode); dput(oi->__upperdentry); ovl_stack_put(ovl_lowerstack(oi->oe), ovl_numlower(oi->oe)); if (S_ISDIR(inode->i_mode)) ovl_dir_cache_free(inode); else kfree(oi->lowerdata_redirect); } static void ovl_put_super(struct super_block *sb) { struct ovl_fs *ofs = OVL_FS(sb); if (ofs) ovl_free_fs(ofs); } /* Sync real dirty inodes in upper filesystem (if it exists) */ static int ovl_sync_fs(struct super_block *sb, int wait) { struct ovl_fs *ofs = OVL_FS(sb); struct super_block *upper_sb; int ret; ret = ovl_sync_status(ofs); if (ret < 0) return -EIO; if (!ret) return ret; /* * Not called for sync(2) call or an emergency sync (SB_I_SKIP_SYNC). * All the super blocks will be iterated, including upper_sb. * * If this is a syncfs(2) call, then we do need to call * sync_filesystem() on upper_sb, but enough if we do it when being * called with wait == 1. */ if (!wait) return 0; upper_sb = ovl_upper_mnt(ofs)->mnt_sb; down_read(&upper_sb->s_umount); ret = sync_filesystem(upper_sb); up_read(&upper_sb->s_umount); return ret; } /** * ovl_statfs * @dentry: The dentry to query * @buf: The struct kstatfs to fill in with stats * * Get the filesystem statistics. As writes always target the upper layer * filesystem pass the statfs to the upper filesystem (if it exists) */ static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct ovl_fs *ofs = OVL_FS(sb); struct dentry *root_dentry = sb->s_root; struct path path; int err; ovl_path_real(root_dentry, &path); err = vfs_statfs(&path, buf); if (!err) { buf->f_namelen = ofs->namelen; buf->f_type = OVERLAYFS_SUPER_MAGIC; if (ovl_has_fsid(ofs)) buf->f_fsid = uuid_to_fsid(sb->s_uuid.b); } return err; } static const struct super_operations ovl_super_operations = { .alloc_inode = ovl_alloc_inode, .free_inode = ovl_free_inode, .destroy_inode = ovl_destroy_inode, .drop_inode = inode_just_drop, .put_super = ovl_put_super, .sync_fs = ovl_sync_fs, .statfs = ovl_statfs, .show_options = ovl_show_options, }; #define OVL_WORKDIR_NAME "work" #define OVL_INDEXDIR_NAME "index" static struct dentry *ovl_workdir_create(struct ovl_fs *ofs, const char *name, bool persist) { struct inode *dir = ofs->workbasedir->d_inode; struct vfsmount *mnt = ovl_upper_mnt(ofs); struct dentry *work; int err; bool retried = false; retry: work = ovl_start_creating_upper(ofs, ofs->workbasedir, &QSTR(name)); if (!IS_ERR(work)) { struct iattr attr = { .ia_valid = ATTR_MODE, .ia_mode = S_IFDIR | 0, }; if (work->d_inode) { end_creating_keep(work); if (persist) return work; err = -EEXIST; if (retried) goto out_dput; retried = true; err = ovl_workdir_cleanup(ofs, ofs->workbasedir, mnt, work, 0); dput(work); if (err == -EINVAL) return ERR_PTR(err); goto retry; } work = ovl_do_mkdir(ofs, dir, work, attr.ia_mode); end_creating_keep(work); err = PTR_ERR(work); if (IS_ERR(work)) goto out_err; /* Weird filesystem returning with hashed negative (kernfs)? */ err = -EINVAL; if (d_really_is_negative(work)) goto out_dput; /* * Try to remove POSIX ACL xattrs from workdir. We are good if: * * a) success (there was a POSIX ACL xattr and was removed) * b) -ENODATA (there was no POSIX ACL xattr) * c) -EOPNOTSUPP (POSIX ACL xattrs are not supported) * * There are various other error values that could effectively * mean that the xattr doesn't exist (e.g. -ERANGE is returned * if the xattr name is too long), but the set of filesystems * allowed as upper are limited to "normal" ones, where checking * for the above two errors is sufficient. */ err = ovl_do_remove_acl(ofs, work, XATTR_NAME_POSIX_ACL_DEFAULT); if (err && err != -ENODATA && err != -EOPNOTSUPP) goto out_dput; err = ovl_do_remove_acl(ofs, work, XATTR_NAME_POSIX_ACL_ACCESS); if (err && err != -ENODATA && err != -EOPNOTSUPP) goto out_dput; /* Clear any inherited mode bits */ inode_lock(work->d_inode); err = ovl_do_notify_change(ofs, work, &attr); inode_unlock(work->d_inode); if (err) goto out_dput; } else { err = PTR_ERR(work); goto out_err; } return work; out_dput: dput(work); out_err: pr_warn("failed to create directory %s/%s (errno: %i); mounting read-only\n", ofs->config.workdir, name, -err); return NULL; } static int ovl_check_namelen(const struct path *path, struct ovl_fs *ofs, const char *name) { struct kstatfs statfs; int err = vfs_statfs(path, &statfs); if (err) pr_err("statfs failed on '%s'\n", name); else ofs->namelen = max(ofs->namelen, statfs.f_namelen); return err; } static int ovl_lower_dir(const char *name, const struct path *path, struct ovl_fs *ofs, int *stack_depth) { int fh_type; int err; err = ovl_check_namelen(path, ofs, name); if (err) return err; *stack_depth = max(*stack_depth, path->mnt->mnt_sb->s_stack_depth); /* * The inodes index feature and NFS export need to encode and decode * file handles, so they require that all layers support them. */ fh_type = ovl_can_decode_fh(path->dentry->d_sb); if ((ofs->config.nfs_export || (ofs->config.index && ofs->config.upperdir)) && !fh_type) { ofs->config.index = false; ofs->config.nfs_export = false; pr_warn("fs on '%s' does not support file handles, falling back to index=off,nfs_export=off.\n", name); } ofs->nofh |= !fh_type; /* * Decoding origin file handle is required for persistent st_ino. * Without persistent st_ino, xino=auto falls back to xino=off. */ if (ofs->config.xino == OVL_XINO_AUTO && ofs->config.upperdir && !fh_type) { ofs->config.xino = OVL_XINO_OFF; pr_warn("fs on '%s' does not support file handles, falling back to xino=off.\n", name); } /* Check if lower fs has 32bit inode numbers */ if (fh_type != FILEID_INO32_GEN) ofs->xino_mode = -1; return 0; } /* Workdir should not be subdir of upperdir and vice versa */ static bool ovl_workdir_ok(struct dentry *workdir, struct dentry *upperdir) { bool ok = false; if (workdir != upperdir) { struct dentry *trap = lock_rename(workdir, upperdir); if (!IS_ERR(trap)) unlock_rename(workdir, upperdir); ok = (trap == NULL); } return ok; } static int ovl_setup_trap(struct super_block *sb, struct dentry *dir, struct inode **ptrap, const char *name) { struct inode *trap; int err; trap = ovl_get_trap_inode(sb, dir); err = PTR_ERR_OR_ZERO(trap); if (err) { if (err == -ELOOP) pr_err("conflicting %s path\n", name); return err; } *ptrap = trap; return 0; } /* * Determine how we treat concurrent use of upperdir/workdir based on the * index feature. This is papering over mount leaks of container runtimes, * for example, an old overlay mount is leaked and now its upperdir is * attempted to be used as a lower layer in a new overlay mount. */ static int ovl_report_in_use(struct ovl_fs *ofs, const char *name) { if (ofs->config.index) { pr_err("%s is in-use as upperdir/workdir of another mount, mount with '-o index=off' to override exclusive upperdir protection.\n", name); return -EBUSY; } else { pr_warn("%s is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.\n", name); return 0; } } static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs, struct ovl_layer *upper_layer, const struct path *upperpath) { struct vfsmount *upper_mnt; int err; /* Upperdir path should not be r/o */ if (__mnt_is_readonly(upperpath->mnt)) { pr_err("upper fs is r/o, try multi-lower layers mount\n"); err = -EINVAL; goto out; } err = ovl_check_namelen(upperpath, ofs, ofs->config.upperdir); if (err) goto out; err = ovl_setup_trap(sb, upperpath->dentry, &upper_layer->trap, "upperdir"); if (err) goto out; upper_mnt = clone_private_mount(upperpath); err = PTR_ERR(upper_mnt); if (IS_ERR(upper_mnt)) { pr_err("failed to clone upperpath\n"); goto out; } /* Don't inherit atime flags */ upper_mnt->mnt_flags &= ~(MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME); upper_layer->mnt = upper_mnt; upper_layer->idx = 0; upper_layer->fsid = 0; /* * Inherit SB_NOSEC flag from upperdir. * * This optimization changes behavior when a security related attribute * (suid/sgid/security.*) is changed on an underlying layer. This is * okay because we don't yet have guarantees in that case, but it will * need careful treatment once we want to honour changes to underlying * filesystems. */ if (upper_mnt->mnt_sb->s_flags & SB_NOSEC) sb->s_flags |= SB_NOSEC; if (ovl_inuse_trylock(ovl_upper_mnt(ofs)->mnt_root)) { ofs->upperdir_locked = true; } else { err = ovl_report_in_use(ofs, "upperdir"); if (err) goto out; } err = 0; out: return err; } /* * Returns 1 if RENAME_WHITEOUT is supported, 0 if not supported and * negative values if error is encountered. */ static int ovl_check_rename_whiteout(struct ovl_fs *ofs) { struct dentry *workdir = ofs->workdir; struct dentry *temp; struct dentry *whiteout; struct name_snapshot name; struct renamedata rd = {}; char name2[OVL_TEMPNAME_SIZE]; int err; temp = ovl_create_temp(ofs, workdir, OVL_CATTR(S_IFREG | 0)); err = PTR_ERR(temp); if (IS_ERR(temp)) return err; rd.mnt_idmap = ovl_upper_mnt_idmap(ofs); rd.old_parent = workdir; rd.new_parent = workdir; rd.flags = RENAME_WHITEOUT; ovl_tempname(name2); err = start_renaming_dentry(&rd, 0, temp, &QSTR(name2)); if (err) { dput(temp); return err; } /* Name is inline and stable - using snapshot as a copy helper */ take_dentry_name_snapshot(&name, temp); err = ovl_do_rename_rd(&rd); end_renaming(&rd); if (err) { if (err == -EINVAL) err = 0; goto cleanup_temp; } whiteout = ovl_lookup_upper_unlocked(ofs, name.name.name, workdir, name.name.len); err = PTR_ERR(whiteout); if (IS_ERR(whiteout)) goto cleanup_temp; err = ovl_upper_is_whiteout(ofs, whiteout); /* Best effort cleanup of whiteout and temp file */ if (err) ovl_cleanup(ofs, workdir, whiteout); dput(whiteout); cleanup_temp: ovl_cleanup(ofs, workdir, temp); release_dentry_name_snapshot(&name); dput(temp); return err; } static struct dentry *ovl_lookup_or_create(struct ovl_fs *ofs, struct dentry *parent, const char *name, umode_t mode) { struct dentry *child; child = ovl_start_creating_upper(ofs, parent, &QSTR(name)); if (!IS_ERR(child)) { if (!child->d_inode) child = ovl_create_real(ofs, parent, child, OVL_CATTR(mode)); end_creating_keep(child); } dput(parent); return child; } /* * Creates $workdir/work/incompat/volatile/dirty file if it is not already * present. */ static int ovl_create_volatile_dirty(struct ovl_fs *ofs) { unsigned int ctr; struct dentry *d = dget(ofs->workbasedir); static const char *const volatile_path[] = { OVL_WORKDIR_NAME, "incompat", "volatile", "dirty" }; const char *const *name = volatile_path; for (ctr = ARRAY_SIZE(volatile_path); ctr; ctr--, name++) { d = ovl_lookup_or_create(ofs, d, *name, ctr > 1 ? S_IFDIR : S_IFREG); if (IS_ERR(d)) return PTR_ERR(d); } dput(d); return 0; } static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs, const struct path *workpath) { struct vfsmount *mnt = ovl_upper_mnt(ofs); struct dentry *workdir; struct file *tmpfile; bool rename_whiteout; bool d_type; int fh_type; int err; err = mnt_want_write(mnt); if (err) return err; workdir = ovl_workdir_create(ofs, OVL_WORKDIR_NAME, false); err = PTR_ERR(workdir); if (IS_ERR_OR_NULL(workdir)) goto out; ofs->workdir = workdir; err = ovl_setup_trap(sb, ofs->workdir, &ofs->workdir_trap, "workdir"); if (err) goto out; /* * Upper should support d_type, else whiteouts are visible. Given * workdir and upper are on same fs, we can do iterate_dir() on * workdir. This check requires successful creation of workdir in * previous step. */ err = ovl_check_d_type_supported(workpath); if (err < 0) goto out; d_type = err; if (!d_type) pr_warn("upper fs needs to support d_type.\n"); /* Check if upper/work fs supports O_TMPFILE */ tmpfile = ovl_do_tmpfile(ofs, ofs->workdir, S_IFREG | 0); ofs->tmpfile = !IS_ERR(tmpfile); if (ofs->tmpfile) fput(tmpfile); else pr_warn("upper fs does not support tmpfile.\n"); /* Check if upper/work fs supports RENAME_WHITEOUT */ err = ovl_check_rename_whiteout(ofs); if (err < 0) goto out; rename_whiteout = err; if (!rename_whiteout) pr_warn("upper fs does not support RENAME_WHITEOUT.\n"); /* * Check if upper/work fs supports (trusted|user).overlay.* xattr */ err = ovl_setxattr(ofs, ofs->workdir, OVL_XATTR_OPAQUE, "0", 1); if (err) { pr_warn("failed to set xattr on upper\n"); ofs->noxattr = true; if (ovl_redirect_follow(ofs)) { ofs->config.redirect_mode = OVL_REDIRECT_NOFOLLOW; pr_warn("...falling back to redirect_dir=nofollow.\n"); } if (ofs->config.metacopy) { ofs->config.metacopy = false; pr_warn("...falling back to metacopy=off.\n"); } if (ofs->config.index) { ofs->config.index = false; pr_warn("...falling back to index=off.\n"); } if (ovl_has_fsid(ofs)) { ofs->config.uuid = OVL_UUID_NULL; pr_warn("...falling back to uuid=null.\n"); } /* * xattr support is required for persistent st_ino. * Without persistent st_ino, xino=auto falls back to xino=off. */ if (ofs->config.xino == OVL_XINO_AUTO) { ofs->config.xino = OVL_XINO_OFF; pr_warn("...falling back to xino=off.\n"); } if (err == -EPERM && !ofs->config.userxattr) pr_info("try mounting with 'userxattr' option\n"); err = 0; } else { ovl_removexattr(ofs, ofs->workdir, OVL_XATTR_OPAQUE); } /* * We allowed sub-optimal upper fs configuration and don't want to break * users over kernel upgrade, but we never allowed remote upper fs, so * we can enforce strict requirements for remote upper fs. */ if (ovl_dentry_remote(ofs->workdir) && (!d_type || !rename_whiteout || ofs->noxattr)) { pr_err("upper fs missing required features.\n"); err = -EINVAL; goto out; } /* * For volatile mount, create a incompat/volatile/dirty file to keep * track of it. */ if (ovl_is_volatile(&ofs->config)) { err = ovl_create_volatile_dirty(ofs); if (err < 0) { pr_err("Failed to create volatile/dirty file.\n"); goto out; } } /* Check if upper/work fs supports file handles */ fh_type = ovl_can_decode_fh(ofs->workdir->d_sb); if (ofs->config.index && !fh_type) { ofs->config.index = false; pr_warn("upper fs does not support file handles, falling back to index=off.\n"); } ofs->nofh |= !fh_type; /* Check if upper fs has 32bit inode numbers */ if (fh_type != FILEID_INO32_GEN) ofs->xino_mode = -1; /* NFS export of r/w mount depends on index */ if (ofs->config.nfs_export && !ofs->config.index) { pr_warn("NFS export requires \"index=on\", falling back to nfs_export=off.\n"); ofs->config.nfs_export = false; } out: mnt_drop_write(mnt); return err; } static int ovl_get_workdir(struct super_block *sb, struct ovl_fs *ofs, const struct path *upperpath, const struct path *workpath) { int err; err = -EINVAL; if (upperpath->mnt != workpath->mnt) { pr_err("workdir and upperdir must reside under the same mount\n"); return err; } if (!ovl_workdir_ok(workpath->dentry, upperpath->dentry)) { pr_err("workdir and upperdir must be separate subtrees\n"); return err; } ofs->workbasedir = dget(workpath->dentry); if (ovl_inuse_trylock(ofs->workbasedir)) { ofs->workdir_locked = true; } else { err = ovl_report_in_use(ofs, "workdir"); if (err) return err; } err = ovl_setup_trap(sb, ofs->workbasedir, &ofs->workbasedir_trap, "workdir"); if (err) return err; return ovl_make_workdir(sb, ofs, workpath); } static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs, struct ovl_entry *oe, const struct path *upperpath) { struct vfsmount *mnt = ovl_upper_mnt(ofs); struct dentry *indexdir; struct dentry *origin = ovl_lowerstack(oe)->dentry; const struct ovl_fh *fh; int err; fh = ovl_get_origin_fh(ofs, origin); if (IS_ERR(fh)) return PTR_ERR(fh); err = mnt_want_write(mnt); if (err) goto out_free_fh; /* Verify lower root is upper root origin */ err = ovl_verify_origin_fh(ofs, upperpath->dentry, fh, true); if (err) { pr_err("failed to verify upper root origin\n"); goto out; } /* index dir will act also as workdir */ iput(ofs->workdir_trap); ofs->workdir_trap = NULL; dput(ofs->workdir); ofs->workdir = NULL; indexdir = ovl_workdir_create(ofs, OVL_INDEXDIR_NAME, true); if (IS_ERR(indexdir)) { err = PTR_ERR(indexdir); } else if (indexdir) { ofs->workdir = indexdir; err = ovl_setup_trap(sb, indexdir, &ofs->workdir_trap, "indexdir"); if (err) goto out; /* * Verify upper root is exclusively associated with index dir. * Older kernels stored upper fh in ".overlay.origin" * xattr. If that xattr exists, verify that it is a match to * upper dir file handle. In any case, verify or set xattr * ".overlay.upper" to indicate that index may have * directory entries. */ if (ovl_check_origin_xattr(ofs, indexdir)) { err = ovl_verify_origin_xattr(ofs, indexdir, OVL_XATTR_ORIGIN, upperpath->dentry, true, false); if (err) pr_err("failed to verify index dir 'origin' xattr\n"); } err = ovl_verify_upper(ofs, indexdir, upperpath->dentry, true); if (err) pr_err("failed to verify index dir 'upper' xattr\n"); /* Cleanup bad/stale/orphan index entries */ if (!err) err = ovl_indexdir_cleanup(ofs); } if (err || !indexdir) pr_warn("try deleting index dir or mounting with '-o index=off' to disable inodes index.\n"); out: mnt_drop_write(mnt); out_free_fh: kfree(fh); return err; } static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid) { unsigned int i; if (!ofs->config.nfs_export && !ovl_upper_mnt(ofs)) return true; /* * We allow using single lower with null uuid for index and nfs_export * for example to support those features with single lower squashfs. * To avoid regressions in setups of overlay with re-formatted lower * squashfs, do not allow decoding origin with lower null uuid unless * user opted-in to one of the new features that require following the * lower inode of non-dir upper. */ if (ovl_allow_offline_changes(ofs) && uuid_is_null(uuid)) return false; for (i = 0; i < ofs->numfs; i++) { /* * We use uuid to associate an overlay lower file handle with a * lower layer, so we can accept lower fs with null uuid as long * as all lower layers with null uuid are on the same fs. * if we detect multiple lower fs with the same uuid, we * disable lower file handle decoding on all of them. */ if (ofs->fs[i].is_lower && ovl_uuid_match(ofs, ofs->fs[i].sb, uuid)) { ofs->fs[i].bad_uuid = true; return false; } } return true; } /* Get a unique fsid for the layer */ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path) { struct super_block *sb = path->mnt->mnt_sb; const uuid_t *uuid = ovl_origin_uuid(ofs) ? &sb->s_uuid : &uuid_null; unsigned int i; dev_t dev; int err; bool bad_uuid = false; bool warn = false; for (i = 0; i < ofs->numfs; i++) { if (ofs->fs[i].sb == sb) return i; } if (!ovl_lower_uuid_ok(ofs, uuid)) { bad_uuid = true; if (ofs->config.xino == OVL_XINO_AUTO) { ofs->config.xino = OVL_XINO_OFF; warn = true; } if (ofs->config.index || ofs->config.nfs_export) { ofs->config.index = false; ofs->config.nfs_export = false; warn = true; } if (warn) { pr_warn("%s uuid in non-single lower fs '%pd2', falling back to xino=%s,index=off,nfs_export=off.\n", uuid_is_null(uuid) ? "null" : "conflicting", path->dentry, ovl_xino_mode(&ofs->config)); } } err = get_anon_bdev(&dev); if (err) { pr_err("failed to get anonymous bdev for lowerpath\n"); return err; } ofs->fs[ofs->numfs].sb = sb; ofs->fs[ofs->numfs].pseudo_dev = dev; ofs->fs[ofs->numfs].bad_uuid = bad_uuid; return ofs->numfs++; } /* * The fsid after the last lower fsid is used for the data layers. * It is a "null fs" with a null sb, null uuid, and no pseudo dev. */ static int ovl_get_data_fsid(struct ovl_fs *ofs) { return ofs->numfs; } /* * Set the ovl sb encoding as the same one used by the first layer */ static int ovl_set_encoding(struct super_block *sb, struct super_block *fs_sb) { if (!sb_has_encoding(fs_sb)) return 0; #if IS_ENABLED(CONFIG_UNICODE) if (sb_has_strict_encoding(fs_sb)) { pr_err("strict encoding not supported\n"); return -EINVAL; } sb->s_encoding = fs_sb->s_encoding; sb->s_encoding_flags = fs_sb->s_encoding_flags; #endif return 0; } static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs, struct ovl_fs_context *ctx, struct ovl_layer *layers) { int err; unsigned int i; size_t nr_merged_lower; ofs->fs = kzalloc_objs(struct ovl_sb, ctx->nr + 2); if (ofs->fs == NULL) return -ENOMEM; /* * idx/fsid 0 are reserved for upper fs even with lower only overlay * and the last fsid is reserved for "null fs" of the data layers. */ ofs->numfs++; /* * All lower layers that share the same fs as upper layer, use the same * pseudo_dev as upper layer. Allocate fs[0].pseudo_dev even for lower * only overlay to simplify ovl_fs_free(). * is_lower will be set if upper fs is shared with a lower layer. */ err = get_anon_bdev(&ofs->fs[0].pseudo_dev); if (err) { pr_err("failed to get anonymous bdev for upper fs\n"); return err; } if (ovl_upper_mnt(ofs)) { ofs->fs[0].sb = ovl_upper_mnt(ofs)->mnt_sb; ofs->fs[0].is_lower = false; if (ofs->casefold) { err = ovl_set_encoding(sb, ofs->fs[0].sb); if (err) return err; } } nr_merged_lower = ctx->nr - ctx->nr_data; for (i = 0; i < ctx->nr; i++) { struct ovl_fs_context_layer *l = &ctx->lower[i]; struct vfsmount *mnt; struct inode *trap; int fsid; if (i < nr_merged_lower) fsid = ovl_get_fsid(ofs, &l->path); else fsid = ovl_get_data_fsid(ofs); if (fsid < 0) return fsid; /* * Check if lower root conflicts with this overlay layers before * checking if it is in-use as upperdir/workdir of "another" * mount, because we do not bother to check in ovl_is_inuse() if * the upperdir/workdir is in fact in-use by our * upperdir/workdir. */ err = ovl_setup_trap(sb, l->path.dentry, &trap, "lowerdir"); if (err) return err; if (ovl_is_inuse(l->path.dentry)) { err = ovl_report_in_use(ofs, "lowerdir"); if (err) { iput(trap); return err; } } mnt = clone_private_mount(&l->path); err = PTR_ERR(mnt); if (IS_ERR(mnt)) { pr_err("failed to clone lowerpath\n"); iput(trap); return err; } /* * Make lower layers R/O. That way fchmod/fchown on lower file * will fail instead of modifying lower fs. */ mnt->mnt_flags |= MNT_READONLY | MNT_NOATIME; layers[ofs->numlayer].trap = trap; layers[ofs->numlayer].mnt = mnt; layers[ofs->numlayer].idx = ofs->numlayer; layers[ofs->numlayer].fsid = fsid; layers[ofs->numlayer].fs = &ofs->fs[fsid]; /* Store for printing lowerdir=... in ovl_show_options() */ ofs->config.lowerdirs[ofs->numlayer] = l->name; l->name = NULL; ofs->numlayer++; ofs->fs[fsid].is_lower = true; if (ofs->casefold) { if (!ovl_upper_mnt(ofs) && !sb_has_encoding(sb)) { err = ovl_set_encoding(sb, ofs->fs[fsid].sb); if (err) return err; } if (!sb_same_encoding(sb, mnt->mnt_sb)) { pr_err("all layers must have the same encoding\n"); return -EINVAL; } } } /* * When all layers on same fs, overlay can use real inode numbers. * With mount option "xino=<on|auto>", mounter declares that there are * enough free high bits in underlying fs to hold the unique fsid. * If overlayfs does encounter underlying inodes using the high xino * bits reserved for fsid, it emits a warning and uses the original * inode number or a non persistent inode number allocated from a * dedicated range. */ if (ofs->numfs - !ovl_upper_mnt(ofs) == 1) { if (ofs->config.xino == OVL_XINO_ON) pr_info("\"xino=on\" is useless with all layers on same fs, ignore.\n"); ofs->xino_mode = 0; } else if (ofs->config.xino == OVL_XINO_OFF) { ofs->xino_mode = -1; } else if (ofs->xino_mode < 0) { /* * This is a roundup of number of bits needed for encoding * fsid, where fsid 0 is reserved for upper fs (even with * lower only overlay) +1 extra bit is reserved for the non * persistent inode number range that is used for resolving * xino lower bits overflow. */ BUILD_BUG_ON(ilog2(OVL_MAX_STACK) > 30); ofs->xino_mode = ilog2(ofs->numfs - 1) + 2; } if (ofs->xino_mode > 0) { pr_info("\"xino\" feature enabled using %d upper inode bits.\n", ofs->xino_mode); } return 0; } static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb, struct ovl_fs_context *ctx, struct ovl_fs *ofs, struct ovl_layer *layers) { int err; unsigned int i; size_t nr_merged_lower; struct ovl_entry *oe; struct ovl_path *lowerstack; struct ovl_fs_context_layer *l; if (!ofs->config.upperdir && ctx->nr == 1) { pr_err("at least 2 lowerdir are needed while upperdir nonexistent\n"); return ERR_PTR(-EINVAL); } if (ctx->nr == ctx->nr_data) { pr_err("at least one non-data lowerdir is required\n"); return ERR_PTR(-EINVAL); } err = -EINVAL; for (i = 0; i < ctx->nr; i++) { l = &ctx->lower[i]; err = ovl_lower_dir(l->name, &l->path, ofs, &sb->s_stack_depth); if (err) return ERR_PTR(err); } err = -EINVAL; sb->s_stack_depth++; if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) { pr_err("maximum fs stacking depth exceeded\n"); return ERR_PTR(err); } err = ovl_get_layers(sb, ofs, ctx, layers); if (err) return ERR_PTR(err); err = -ENOMEM; /* Data-only layers are not merged in root directory */ nr_merged_lower = ctx->nr - ctx->nr_data; oe = ovl_alloc_entry(nr_merged_lower); if (!oe) return ERR_PTR(err); lowerstack = ovl_lowerstack(oe); for (i = 0; i < nr_merged_lower; i++) { l = &ctx->lower[i]; lowerstack[i].dentry = dget(l->path.dentry); lowerstack[i].layer = &ofs->layers[i + 1]; } ofs->numdatalayer = ctx->nr_data; return oe; } /* * Check if this layer root is a descendant of: * - another layer of this overlayfs instance * - upper/work dir of any overlayfs instance */ static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs, struct dentry *dentry, const char *name, bool is_lower) { struct dentry *next = dentry, *parent; int err = 0; if (!dentry) return 0; parent = dget_parent(next); /* Walk back ancestors to root (inclusive) looking for traps */ while (!err && parent != next) { if (is_lower && ovl_lookup_trap_inode(sb, parent)) { err = -ELOOP; pr_err("overlapping %s path\n", name); } else if (ovl_is_inuse(parent)) { err = ovl_report_in_use(ofs, name); } next = parent; parent = dget_parent(next); dput(next); } dput(parent); return err; } /* * Check if any of the layers or work dirs overlap. */ static int ovl_check_overlapping_layers(struct super_block *sb, struct ovl_fs *ofs) { int i, err; if (ovl_upper_mnt(ofs)) { err = ovl_check_layer(sb, ofs, ovl_upper_mnt(ofs)->mnt_root, "upperdir", false); if (err) return err; /* * Checking workbasedir avoids hitting ovl_is_inuse(parent) of * this instance and covers overlapping work and index dirs, * unless work or index dir have been moved since created inside * workbasedir. In that case, we already have their traps in * inode cache and we will catch that case on lookup. */ err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir", false); if (err) return err; } for (i = 1; i < ofs->numlayer; i++) { err = ovl_check_layer(sb, ofs, ofs->layers[i].mnt->mnt_root, "lowerdir", true); if (err) return err; } return 0; } static struct dentry *ovl_get_root(struct super_block *sb, struct dentry *upperdentry, struct ovl_entry *oe) { struct dentry *root; struct ovl_fs *ofs = OVL_FS(sb); struct ovl_path *lowerpath = ovl_lowerstack(oe); unsigned long ino = d_inode(lowerpath->dentry)->i_ino; int fsid = lowerpath->layer->fsid; struct ovl_inode_params oip = { .upperdentry = upperdentry, .oe = oe, }; root = d_make_root(ovl_new_inode(sb, S_IFDIR, 0)); if (!root) return NULL; if (upperdentry) { /* Root inode uses upper st_ino/i_ino */ ino = d_inode(upperdentry)->i_ino; fsid = 0; ovl_dentry_set_upper_alias(root); if (ovl_is_impuredir(sb, upperdentry)) ovl_set_flag(OVL_IMPURE, d_inode(root)); } /* Look for xwhiteouts marker except in the lowermost layer */ for (int i = 0; i < ovl_numlower(oe) - 1; i++, lowerpath++) { struct path path = { .mnt = lowerpath->layer->mnt, .dentry = lowerpath->dentry, }; /* overlay.opaque=x means xwhiteouts directory */ if (ovl_get_opaquedir_val(ofs, &path) == 'x') { ovl_layer_set_xwhiteouts(ofs, lowerpath->layer); ovl_dentry_set_xwhiteouts(root); } } /* Root is always merge -> can have whiteouts */ ovl_set_flag(OVL_WHITEOUTS, d_inode(root)); ovl_dentry_set_flag(OVL_E_CONNECTED, root); ovl_set_upperdata(d_inode(root)); ovl_inode_init(d_inode(root), &oip, ino, fsid); WARN_ON(!!IS_CASEFOLDED(d_inode(root)) != ofs->casefold); ovl_dentry_init_flags(root, upperdentry, oe, DCACHE_OP_WEAK_REVALIDATE); /* root keeps a reference of upperdentry */ dget(upperdentry); return root; } static void ovl_set_d_op(struct super_block *sb) { #if IS_ENABLED(CONFIG_UNICODE) struct ovl_fs *ofs = sb->s_fs_info; if (ofs->casefold) { set_default_d_op(sb, &ovl_dentry_ci_operations); return; } #endif set_default_d_op(sb, &ovl_dentry_operations); } static int ovl_fill_super_creds(struct fs_context *fc, struct super_block *sb) { struct ovl_fs *ofs = sb->s_fs_info; struct cred *creator_cred = (struct cred *)ofs->creator_cred; struct ovl_fs_context *ctx = fc->fs_private; struct ovl_layer *layers; struct ovl_entry *oe = NULL; int err; err = ovl_fs_params_verify(ctx, &ofs->config); if (err) return err; err = -EINVAL; if (ctx->nr == 0) { if (!(fc->sb_flags & SB_SILENT)) pr_err("missing 'lowerdir'\n"); return err; } err = -ENOMEM; layers = kzalloc_objs(struct ovl_layer, ctx->nr + 1); if (!layers) return err; ofs->config.lowerdirs = kcalloc(ctx->nr + 1, sizeof(char *), GFP_KERNEL); if (!ofs->config.lowerdirs) { kfree(layers); return err; } ofs->layers = layers; /* * Layer 0 is reserved for upper even if there's no upper. * config.lowerdirs[0] is used for storing the user provided colon * separated lowerdir string. */ ofs->config.lowerdirs[0] = ctx->lowerdir_all; ctx->lowerdir_all = NULL; ofs->numlayer = 1; sb->s_stack_depth = 0; sb->s_maxbytes = MAX_LFS_FILESIZE; atomic_long_set(&ofs->last_ino, 1); /* Assume underlying fs uses 32bit inodes unless proven otherwise */ if (ofs->config.xino != OVL_XINO_OFF) { ofs->xino_mode = BITS_PER_LONG - 32; if (!ofs->xino_mode) { pr_warn("xino not supported on 32bit kernel, falling back to xino=off.\n"); ofs->config.xino = OVL_XINO_OFF; } } /* alloc/destroy_inode needed for setting up traps in inode cache */ sb->s_op = &ovl_super_operations; if (ofs->config.upperdir) { struct super_block *upper_sb; err = -EINVAL; if (!ofs->config.workdir) { pr_err("missing 'workdir'\n"); return err; } err = ovl_get_upper(sb, ofs, &layers[0], &ctx->upper); if (err) return err; upper_sb = ovl_upper_mnt(ofs)->mnt_sb; if (!ovl_should_sync(ofs)) { ofs->errseq = errseq_sample(&upper_sb->s_wb_err); if (errseq_check(&upper_sb->s_wb_err, ofs->errseq)) { err = -EIO; pr_err("Cannot mount volatile when upperdir has an unseen error. Sync upperdir fs to clear state.\n"); return err; } } err = ovl_get_workdir(sb, ofs, &ctx->upper, &ctx->work); if (err) return err; if (!ofs->workdir) sb->s_flags |= SB_RDONLY; sb->s_stack_depth = upper_sb->s_stack_depth; sb->s_time_gran = upper_sb->s_time_gran; } oe = ovl_get_lowerstack(sb, ctx, ofs, layers); err = PTR_ERR(oe); if (IS_ERR(oe)) return err; /* If the upper fs is nonexistent, we mark overlayfs r/o too */ if (!ovl_upper_mnt(ofs)) sb->s_flags |= SB_RDONLY; if (ovl_has_fsid(ofs) && ovl_upper_mnt(ofs)) { /* Use per instance persistent uuid/fsid */ ovl_init_uuid_xattr(sb, ofs, &ctx->upper); } if (!ovl_force_readonly(ofs) && ofs->config.index) { err = ovl_get_indexdir(sb, ofs, oe, &ctx->upper); if (err) goto out_free_oe; /* Force r/o mount with no index dir */ if (!ofs->workdir) sb->s_flags |= SB_RDONLY; } err = ovl_check_overlapping_layers(sb, ofs); if (err) goto out_free_oe; /* Show index=off in /proc/mounts for forced r/o mount */ if (!ofs->workdir) { ofs->config.index = false; if (ovl_upper_mnt(ofs) && ofs->config.nfs_export) { pr_warn("NFS export requires an index dir, falling back to nfs_export=off.\n"); ofs->config.nfs_export = false; } } if (ofs->config.metacopy && ofs->config.nfs_export) { pr_warn("NFS export is not supported with metadata only copy up, falling back to nfs_export=off.\n"); ofs->config.nfs_export = false; } /* * Support encoding decodable file handles with nfs_export=on * and encoding non-decodable file handles with nfs_export=off * if all layers support file handles. */ if (ofs->config.nfs_export) sb->s_export_op = &ovl_export_operations; else if (!ofs->nofh) sb->s_export_op = &ovl_export_fid_operations; /* Never override disk quota limits or use reserved space */ cap_lower(creator_cred->cap_effective, CAP_SYS_RESOURCE); sb->s_magic = OVERLAYFS_SUPER_MAGIC; sb->s_xattr = ovl_xattr_handlers(ofs); sb->s_fs_info = ofs; #ifdef CONFIG_FS_POSIX_ACL sb->s_flags |= SB_POSIXACL; #endif sb->s_iflags |= SB_I_SKIP_SYNC; /* * Ensure that umask handling is done by the filesystems used * for the the upper layer instead of overlayfs as that would * lead to unexpected results. */ sb->s_iflags |= SB_I_NOUMASK; sb->s_iflags |= SB_I_EVM_HMAC_UNSUPPORTED; err = -ENOMEM; sb->s_root = ovl_get_root(sb, ctx->upper.dentry, oe); if (!sb->s_root) goto out_free_oe; return 0; out_free_oe: ovl_free_entry(oe); return err; } int ovl_fill_super(struct super_block *sb, struct fs_context *fc) { struct ovl_fs *ofs = sb->s_fs_info; int err; err = -EIO; if (WARN_ON(fc->user_ns != current_user_ns())) goto out_err; ovl_set_d_op(sb); if (!ofs->creator_cred) { err = -ENOMEM; ofs->creator_cred = prepare_creds(); if (!ofs->creator_cred) goto out_err; } with_ovl_creds(sb) err = ovl_fill_super_creds(fc, sb); out_err: if (err) { ovl_free_fs(ofs); sb->s_fs_info = NULL; } return err; } struct file_system_type ovl_fs_type = { .owner = THIS_MODULE, .name = "overlay", .init_fs_context = ovl_init_fs_context, .parameters = ovl_parameter_spec, .fs_flags = FS_USERNS_MOUNT, .kill_sb = kill_anon_super, }; MODULE_ALIAS_FS("overlay"); static void ovl_inode_init_once(void *foo) { struct ovl_inode *oi = foo; inode_init_once(&oi->vfs_inode); } static int __init ovl_init(void) { int err; ovl_inode_cachep = kmem_cache_create("ovl_inode", sizeof(struct ovl_inode), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_ACCOUNT), ovl_inode_init_once); if (ovl_inode_cachep == NULL) return -ENOMEM; err = register_filesystem(&ovl_fs_type); if (!err) return 0; kmem_cache_destroy(ovl_inode_cachep); return err; } static void __exit ovl_exit(void) { unregister_filesystem(&ovl_fs_type); /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(ovl_inode_cachep); } module_init(ovl_init); module_exit(ovl_exit);
72 71 72 72 72 27 27 27 94 94 94 43 43 92 94 45 90 2 92 121 121 73 2 71 71 121 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 // SPDX-License-Identifier: GPL-2.0-only /* * AppArmor security module * * This file contains AppArmor function for pathnames * * Copyright (C) 1998-2008 Novell/SUSE * Copyright 2009-2010 Canonical Ltd. */ #include <linux/magic.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/nsproxy.h> #include <linux/path.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/fs_struct.h> #include "include/apparmor.h" #include "include/path.h" #include "include/policy.h" /* modified from dcache.c */ static int prepend(char **buffer, int buflen, const char *str, int namelen) { buflen -= namelen; if (buflen < 0) return -ENAMETOOLONG; *buffer -= namelen; memcpy(*buffer, str, namelen); return 0; } #define CHROOT_NSCONNECT (PATH_CHROOT_REL | PATH_CHROOT_NSCONNECT) /* If the path is not connected to the expected root, * check if it is a sysctl and handle specially else remove any * leading / that __d_path may have returned. * Unless * specifically directed to connect the path, * OR * if in a chroot and doing chroot relative paths and the path * resolves to the namespace root (would be connected outside * of chroot) and specifically directed to connect paths to * namespace root. */ static int disconnect(const struct path *path, char *buf, char **name, int flags, const char *disconnected) { int error = 0; if (!(flags & PATH_CONNECT_PATH) && !(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) && our_mnt(path->mnt))) { /* disconnected path, don't return pathname starting * with '/' */ error = -EACCES; if (**name == '/') *name = *name + 1; } else { if (**name != '/') /* CONNECT_PATH with missing root */ error = prepend(name, *name - buf, "/", 1); if (!error && disconnected) error = prepend(name, *name - buf, disconnected, strlen(disconnected)); } return error; } /** * d_namespace_path - lookup a name associated with a given path * @path: path to lookup (NOT NULL) * @buf: buffer to store path to (NOT NULL) * @name: Returns - pointer for start of path name with in @buf (NOT NULL) * @flags: flags controlling path lookup * @disconnected: string to prefix to disconnected paths * * Handle path name lookup. * * Returns: %0 else error code if path lookup fails * When no error the path name is returned in @name which points to * a position in @buf */ static int d_namespace_path(const struct path *path, char *buf, char **name, int flags, const char *disconnected) { char *res; int error = 0; int connected = 1; int isdir = (flags & PATH_IS_DIR) ? 1 : 0; int buflen = aa_g_path_max - isdir; if (path->mnt->mnt_flags & MNT_INTERNAL) { /* it's not mounted anywhere */ res = dentry_path(path->dentry, buf, buflen); *name = res; if (IS_ERR(res)) { *name = buf; return PTR_ERR(res); } if (path->dentry->d_sb->s_magic == PROC_SUPER_MAGIC && strncmp(*name, "/sys/", 5) == 0) { /* TODO: convert over to using a per namespace * control instead of hard coded /proc */ error = prepend(name, *name - buf, "/proc", 5); goto out; } else error = disconnect(path, buf, name, flags, disconnected); goto out; } /* resolve paths relative to chroot?*/ if (flags & PATH_CHROOT_REL) { struct path root; get_fs_root(current->fs, &root); res = __d_path(path, &root, buf, buflen); path_put(&root); } else { res = d_absolute_path(path, buf, buflen); if (!our_mnt(path->mnt)) connected = 0; } /* handle error conditions - and still allow a partial path to * be returned. */ if (IS_ERR_OR_NULL(res)) { if (PTR_ERR(res) == -ENAMETOOLONG) { error = -ENAMETOOLONG; *name = buf; goto out; } connected = 0; res = dentry_path_raw(path->dentry, buf, buflen); if (IS_ERR(res)) { error = PTR_ERR(res); *name = buf; goto out; } } else if (!our_mnt(path->mnt)) connected = 0; *name = res; if (!connected) error = disconnect(path, buf, name, flags, disconnected); /* Handle two cases: * 1. A deleted dentry && profile is not allowing mediation of deleted * 2. On some filesystems, newly allocated dentries appear to the * security_path hooks as a deleted dentry except without an inode * allocated. */ if (d_unlinked(path->dentry) && d_is_positive(path->dentry) && !(flags & (PATH_MEDIATE_DELETED | PATH_DELEGATE_DELETED))) { error = -ENOENT; goto out; } out: /* Append "/" to directory paths, except for root "/" which * already ends in a slash. */ if (!error && isdir) { bool is_root = (*name)[0] == '/' && (*name)[1] == '\0'; if (!is_root) buf[aa_g_path_max - 2] = '/'; } return error; } /** * aa_path_name - get the pathname to a buffer ensure dir / is appended * @path: path the file (NOT NULL) * @flags: flags controlling path name generation * @buffer: buffer to put name in (NOT NULL) * @name: Returns - the generated path name if !error (NOT NULL) * @info: Returns - information on why the path lookup failed (MAYBE NULL) * @disconnected: string to prepend to disconnected paths * * @name is a pointer to the beginning of the pathname (which usually differs * from the beginning of the buffer), or NULL. If there is an error @name * may contain a partial or invalid name that can be used for audit purposes, * but it can not be used for mediation. * * We need PATH_IS_DIR to indicate whether the file is a directory or not * because the file may not yet exist, and so we cannot check the inode's * file type. * * Returns: %0 else error code if could retrieve name */ int aa_path_name(const struct path *path, int flags, char *buffer, const char **name, const char **info, const char *disconnected) { char *str = NULL; int error = d_namespace_path(path, buffer, &str, flags, disconnected); if (info && error) { if (error == -ENOENT) *info = "Failed name lookup - deleted entry"; else if (error == -EACCES) *info = "Failed name lookup - disconnected path"; else if (error == -ENAMETOOLONG) *info = "Failed name lookup - name too long"; else *info = "Failed name lookup"; } *name = str; return error; }
381 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BIO_INTEGRITY_H #define _LINUX_BIO_INTEGRITY_H #include <linux/bio.h> enum bip_flags { BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ BIP_DISK_NOCHECK = 1 << 2, /* disable disk integrity checking */ BIP_IP_CHECKSUM = 1 << 3, /* IP checksum */ BIP_COPY_USER = 1 << 4, /* Kernel bounce buffer in use */ BIP_CHECK_GUARD = 1 << 5, /* guard check */ BIP_CHECK_REFTAG = 1 << 6, /* reftag check */ BIP_CHECK_APPTAG = 1 << 7, /* apptag check */ BIP_MEMPOOL = 1 << 15, /* buffer backed by mempool */ }; struct bio_integrity_payload { struct bvec_iter bip_iter; unsigned short bip_vcnt; /* # of integrity bio_vecs */ unsigned short bip_max_vcnt; /* integrity bio_vec slots */ unsigned short bip_flags; /* control flags */ u16 app_tag; /* application tag value */ struct bio_vec *bip_vec; }; #define BIP_CLONE_FLAGS (BIP_MAPPED_INTEGRITY | BIP_IP_CHECKSUM | \ BIP_CHECK_GUARD | BIP_CHECK_REFTAG | BIP_CHECK_APPTAG) #ifdef CONFIG_BLK_DEV_INTEGRITY #define bip_for_each_vec(bvl, bip, iter) \ for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter) #define bio_for_each_integrity_vec(_bvl, _bio, _iter) \ for_each_bio(_bio) \ bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) static inline struct bio_integrity_payload *bio_integrity(struct bio *bio) { if (bio->bi_opf & REQ_INTEGRITY) return bio->bi_integrity; return NULL; } static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) { struct bio_integrity_payload *bip = bio_integrity(bio); if (bip) return bip->bip_flags & flag; return false; } static inline sector_t bip_get_seed(struct bio_integrity_payload *bip) { return bip->bip_iter.bi_sector; } static inline void bip_set_seed(struct bio_integrity_payload *bip, sector_t seed) { bip->bip_iter.bi_sector = seed; } void bio_integrity_init(struct bio *bio, struct bio_integrity_payload *bip, struct bio_vec *bvecs, unsigned int nr_vecs); struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, gfp_t gfp, unsigned int nr); int bio_integrity_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int offset); int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter); int bio_integrity_map_iter(struct bio *bio, struct uio_meta *meta); void bio_integrity_unmap_user(struct bio *bio); bool bio_integrity_prep(struct bio *bio); void bio_integrity_advance(struct bio *bio, unsigned int bytes_done); void bio_integrity_trim(struct bio *bio); int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask); #else /* CONFIG_BLK_DEV_INTEGRITY */ static inline struct bio_integrity_payload *bio_integrity(struct bio *bio) { return NULL; } static inline int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter) { return -EINVAL; } static inline int bio_integrity_map_iter(struct bio *bio, struct uio_meta *meta) { return -EINVAL; } static inline void bio_integrity_unmap_user(struct bio *bio) { } static inline bool bio_integrity_prep(struct bio *bio) { return true; } static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask) { return 0; } static inline void bio_integrity_advance(struct bio *bio, unsigned int bytes_done) { } static inline void bio_integrity_trim(struct bio *bio) { } static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) { return false; } static inline struct bio_integrity_payload * bio_integrity_alloc(struct bio *bio, gfp_t gfp, unsigned int nr) { return ERR_PTR(-EINVAL); } static inline int bio_integrity_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int offset) { return 0; } #endif /* CONFIG_BLK_DEV_INTEGRITY */ void bio_integrity_alloc_buf(struct bio *bio, bool zero_buffer); void bio_integrity_free_buf(struct bio_integrity_payload *bip); #endif /* _LINUX_BIO_INTEGRITY_H */
2 1 1 1 1 1 2 2 2 2 2 2 2 9 8 8 9 9 9 9 6 1 6 5 6 4 1 4 3 4 9 5 4 1 3 2 4 9 3 9 9 5 17 15 4 1 1 16 2 1 1 17 1 1 4 9 4 2 46 11 43 6 4 43 6 43 9 43 34 32 46 35 13 35 32 35 2 4 4 2 2 2 2 2 3 1 1 1 1 1 1 1 60 60 13 60 45 25 45 20 45 55 3 3 3 3 3 60 7 59 60 60 60 59 60 59 60 59 40 60 20 60 60 60 60 27 27 4 2 25 25 84 83 84 80 1 1 6 151 151 151 149 25 25 25 70 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 // SPDX-License-Identifier: GPL-2.0-or-later /* -*- linux-c -*- * INET 802.1Q VLAN * Ethernet-type device handling. * * Authors: Ben Greear <greearb@candelatech.com> * Please send support related email to: netdev@vger.kernel.org * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html * * Fixes: Mar 22 2001: Martin Bokaemper <mbokaemper@unispherenetworks.com> * - reset skb->pkt_type on incoming packets when MAC was changed * - see that changed MAC is saddr for outgoing packets * Oct 20, 2001: Ard van Breeman: * - Fix MC-list, finally. * - Flush MC-list on VLAN destroy. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/net_tstamp.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/phy.h> #include <net/arp.h> #include <net/macsec.h> #include <net/netdev_lock.h> #include "vlan.h" #include "vlanproc.h" #include <linux/if_vlan.h> #include <linux/netpoll.h> /* * Create the VLAN header for an arbitrary protocol layer * * saddr=NULL means use device source address * daddr=NULL means leave destination address (eg unresolved arp) * * This is called when the SKB is moving down the stack towards the * physical devices. */ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned int len) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct vlan_hdr *vhdr; unsigned int vhdrlen = 0; u16 vlan_tci = 0; int rc; if (!(vlan->flags & VLAN_FLAG_REORDER_HDR)) { vhdr = skb_push(skb, VLAN_HLEN); vlan_tci = vlan->vlan_id; vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority); vhdr->h_vlan_TCI = htons(vlan_tci); /* * Set the protocol type. For a packet of type ETH_P_802_3/2 we * put the length in here instead. */ if (type != ETH_P_802_3 && type != ETH_P_802_2) vhdr->h_vlan_encapsulated_proto = htons(type); else vhdr->h_vlan_encapsulated_proto = htons(len); skb->protocol = vlan->vlan_proto; type = ntohs(vlan->vlan_proto); vhdrlen = VLAN_HLEN; } /* Before delegating work to the lower layer, enter our MAC-address */ if (saddr == NULL) saddr = dev->dev_addr; /* Now make the underlying real hard header */ dev = vlan->real_dev; rc = dev_hard_header(skb, dev, type, daddr, saddr, len + vhdrlen); if (rc > 0) rc += vhdrlen; return rc; } static inline netdev_tx_t vlan_netpoll_send_skb(struct vlan_dev_priv *vlan, struct sk_buff *skb) { #ifdef CONFIG_NET_POLL_CONTROLLER return netpoll_send_skb(vlan->netpoll, skb); #else BUG(); return NETDEV_TX_OK; #endif } static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); unsigned int len; int ret; /* Handle non-VLAN frames if they are sent to us, for example by DHCP. * * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... */ if (vlan->flags & VLAN_FLAG_REORDER_HDR || veth->h_vlan_proto != vlan->vlan_proto) { u16 vlan_tci; vlan_tci = vlan->vlan_id; vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority); __vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci); } skb->dev = vlan->real_dev; len = skb->len; if (unlikely(netpoll_tx_running(dev))) return vlan_netpoll_send_skb(vlan, skb); ret = dev_queue_xmit(skb); if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { struct vlan_pcpu_stats *stats; stats = this_cpu_ptr(vlan->vlan_pcpu_stats); u64_stats_update_begin(&stats->syncp); u64_stats_inc(&stats->tx_packets); u64_stats_add(&stats->tx_bytes, len); u64_stats_update_end(&stats->syncp); } else { this_cpu_inc(vlan->vlan_pcpu_stats->tx_dropped); } return ret; } static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; unsigned int max_mtu = real_dev->mtu; if (netif_reduces_vlan_mtu(real_dev)) max_mtu -= VLAN_HLEN; if (max_mtu < new_mtu) return -ERANGE; WRITE_ONCE(dev->mtu, new_mtu); return 0; } void vlan_dev_set_ingress_priority(const struct net_device *dev, u32 skb_prio, u16 vlan_prio) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); if (vlan->ingress_priority_map[vlan_prio & 0x7] && !skb_prio) vlan->nr_ingress_mappings--; else if (!vlan->ingress_priority_map[vlan_prio & 0x7] && skb_prio) vlan->nr_ingress_mappings++; vlan->ingress_priority_map[vlan_prio & 0x7] = skb_prio; } int vlan_dev_set_egress_priority(const struct net_device *dev, u32 skb_prio, u16 vlan_prio) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct vlan_priority_tci_mapping *mp = NULL; struct vlan_priority_tci_mapping *np; u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK; /* See if a priority mapping exists.. */ mp = vlan->egress_priority_map[skb_prio & 0xF]; while (mp) { if (mp->priority == skb_prio) { if (mp->vlan_qos && !vlan_qos) vlan->nr_egress_mappings--; else if (!mp->vlan_qos && vlan_qos) vlan->nr_egress_mappings++; mp->vlan_qos = vlan_qos; return 0; } mp = mp->next; } /* Create a new mapping then. */ mp = vlan->egress_priority_map[skb_prio & 0xF]; np = kmalloc_obj(struct vlan_priority_tci_mapping); if (!np) return -ENOBUFS; np->next = mp; np->priority = skb_prio; np->vlan_qos = vlan_qos; /* Before inserting this element in hash table, make sure all its fields * are committed to memory. * coupled with smp_rmb() in vlan_dev_get_egress_qos_mask() */ smp_wmb(); vlan->egress_priority_map[skb_prio & 0xF] = np; if (vlan_qos) vlan->nr_egress_mappings++; return 0; } /* Flags are defined in the vlan_flags enum in * include/uapi/linux/if_vlan.h file. */ int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); u32 old_flags = vlan->flags; if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP | VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP | VLAN_FLAG_BRIDGE_BINDING)) return -EINVAL; vlan->flags = (old_flags & ~mask) | (flags & mask); if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_GVRP) { if (vlan->flags & VLAN_FLAG_GVRP) vlan_gvrp_request_join(dev); else vlan_gvrp_request_leave(dev); } if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_MVRP) { if (vlan->flags & VLAN_FLAG_MVRP) vlan_mvrp_request_join(dev); else vlan_mvrp_request_leave(dev); } return 0; } void vlan_dev_get_realdev_name(const struct net_device *dev, char *result, size_t size) { strscpy_pad(result, vlan_dev_priv(dev)->real_dev->name, size); } bool vlan_dev_inherit_address(struct net_device *dev, struct net_device *real_dev) { if (dev->addr_assign_type != NET_ADDR_STOLEN) return false; eth_hw_addr_set(dev, real_dev->dev_addr); call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); return true; } static int vlan_dev_open(struct net_device *dev) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct net_device *real_dev = vlan->real_dev; int err; if (!(real_dev->flags & IFF_UP) && !(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) return -ENETDOWN; if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr) && !vlan_dev_inherit_address(dev, real_dev)) { err = dev_uc_add(real_dev, dev->dev_addr); if (err < 0) goto out; } ether_addr_copy(vlan->real_dev_addr, real_dev->dev_addr); if (vlan->flags & VLAN_FLAG_GVRP) vlan_gvrp_request_join(dev); if (vlan->flags & VLAN_FLAG_MVRP) vlan_mvrp_request_join(dev); if (netif_carrier_ok(real_dev) && !(vlan->flags & VLAN_FLAG_BRIDGE_BINDING)) netif_carrier_on(dev); return 0; out: netif_carrier_off(dev); return err; } static int vlan_dev_stop(struct net_device *dev) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct net_device *real_dev = vlan->real_dev; dev_mc_unsync(real_dev, dev); dev_uc_unsync(real_dev, dev); if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) dev_uc_del(real_dev, dev->dev_addr); if (!(vlan->flags & VLAN_FLAG_BRIDGE_BINDING)) netif_carrier_off(dev); return 0; } static int vlan_dev_set_mac_address(struct net_device *dev, void *p) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; struct sockaddr *addr = p; int err; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; if (!(dev->flags & IFF_UP)) goto out; if (!ether_addr_equal(addr->sa_data, real_dev->dev_addr)) { err = dev_uc_add(real_dev, addr->sa_data); if (err < 0) return err; } if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) dev_uc_del(real_dev, dev->dev_addr); out: eth_hw_addr_set(dev, addr->sa_data); return 0; } static int vlan_hwtstamp_get(struct net_device *dev, struct kernel_hwtstamp_config *cfg) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; return generic_hwtstamp_get_lower(real_dev, cfg); } static int vlan_hwtstamp_set(struct net_device *dev, struct kernel_hwtstamp_config *cfg, struct netlink_ext_ack *extack) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; if (!net_eq(dev_net(dev), dev_net(real_dev))) return -EOPNOTSUPP; return generic_hwtstamp_set_lower(real_dev, cfg, extack); } static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; struct ifreq ifrr; int err = -EOPNOTSUPP; strscpy_pad(ifrr.ifr_name, real_dev->name, IFNAMSIZ); ifrr.ifr_ifru = ifr->ifr_ifru; switch (cmd) { case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: err = dev_eth_ioctl(real_dev, &ifrr, cmd); break; } if (!err) ifr->ifr_ifru = ifrr.ifr_ifru; return err; } static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; int err = 0; if (netif_device_present(real_dev) && ops->ndo_neigh_setup) err = ops->ndo_neigh_setup(real_dev, pa); return err; } #if IS_ENABLED(CONFIG_FCOE) static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid, struct scatterlist *sgl, unsigned int sgc) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; int rc = 0; if (ops->ndo_fcoe_ddp_setup) rc = ops->ndo_fcoe_ddp_setup(real_dev, xid, sgl, sgc); return rc; } static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; int len = 0; if (ops->ndo_fcoe_ddp_done) len = ops->ndo_fcoe_ddp_done(real_dev, xid); return len; } static int vlan_dev_fcoe_enable(struct net_device *dev) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; int rc = -EINVAL; if (ops->ndo_fcoe_enable) rc = ops->ndo_fcoe_enable(real_dev); return rc; } static int vlan_dev_fcoe_disable(struct net_device *dev) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; int rc = -EINVAL; if (ops->ndo_fcoe_disable) rc = ops->ndo_fcoe_disable(real_dev); return rc; } static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid, struct scatterlist *sgl, unsigned int sgc) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; int rc = 0; if (ops->ndo_fcoe_ddp_target) rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc); return rc; } #endif #ifdef NETDEV_FCOE_WWNN static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; int rc = -EINVAL; if (ops->ndo_fcoe_get_wwn) rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type); return rc; } #endif static void vlan_dev_change_rx_flags(struct net_device *dev, int change) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; if (change & IFF_ALLMULTI) dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); if (change & IFF_PROMISC) dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1); } static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) { dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); } static __be16 vlan_parse_protocol(const struct sk_buff *skb) { struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); return __vlan_get_protocol(skb, veth->h_vlan_proto, NULL); } static const struct header_ops vlan_header_ops = { .create = vlan_dev_hard_header, .parse = eth_header_parse, .parse_protocol = vlan_parse_protocol, }; static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned int len) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct net_device *real_dev = vlan->real_dev; if (saddr == NULL) saddr = dev->dev_addr; return dev_hard_header(skb, real_dev, type, daddr, saddr, len); } static const struct header_ops vlan_passthru_header_ops = { .create = vlan_passthru_hard_header, .parse = eth_header_parse, .parse_protocol = vlan_parse_protocol, }; static const struct device_type vlan_type = { .name = "vlan", }; static const struct net_device_ops vlan_netdev_ops; static int vlan_dev_init(struct net_device *dev) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct net_device *real_dev = vlan->real_dev; netif_carrier_off(dev); /* IFF_BROADCAST|IFF_MULTICAST; ??? */ dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_MASTER | IFF_SLAVE); dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))) | (1<<__LINK_STATE_PRESENT); if (vlan->flags & VLAN_FLAG_BRIDGE_BINDING) dev->state |= (1 << __LINK_STATE_NOCARRIER); dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | NETIF_F_HIGHDMA | NETIF_F_SCTP_CRC | NETIF_F_FCOE_CRC | NETIF_F_FSO; if (real_dev->vlan_features & NETIF_F_HW_MACSEC) dev->hw_features |= NETIF_F_HW_MACSEC; dev->features |= dev->hw_features; dev->lltx = true; dev->fcoe_mtu = true; netif_inherit_tso_max(dev, real_dev); if (dev->features & NETIF_F_VLAN_FEATURES) netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n"); dev->vlan_features = real_dev->vlan_features & ~(NETIF_F_FCOE_CRC | NETIF_F_FSO); dev->hw_enc_features = vlan_tnl_features(real_dev); dev->mpls_features = real_dev->mpls_features; /* ipv6 shared card related stuff */ dev->dev_id = real_dev->dev_id; if (is_zero_ether_addr(dev->dev_addr)) { eth_hw_addr_set(dev, real_dev->dev_addr); dev->addr_assign_type = NET_ADDR_STOLEN; } if (is_zero_ether_addr(dev->broadcast)) memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); #if IS_ENABLED(CONFIG_FCOE) dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid; #endif dev->needed_headroom = real_dev->needed_headroom; if (vlan_hw_offload_capable(real_dev->features, vlan->vlan_proto)) { dev->header_ops = &vlan_passthru_header_ops; dev->hard_header_len = real_dev->hard_header_len; } else { dev->header_ops = &vlan_header_ops; dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; } dev->netdev_ops = &vlan_netdev_ops; SET_NETDEV_DEVTYPE(dev, &vlan_type); netdev_lockdep_set_classes(dev); vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats); if (!vlan->vlan_pcpu_stats) return -ENOMEM; /* Get vlan's reference to real_dev */ netdev_hold(real_dev, &vlan->dev_tracker, GFP_KERNEL); return 0; } /* Note: this function might be called multiple times for the same device. */ void vlan_dev_free_egress_priority(const struct net_device *dev) { struct vlan_priority_tci_mapping *pm; struct vlan_dev_priv *vlan = vlan_dev_priv(dev); int i; for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { while ((pm = vlan->egress_priority_map[i]) != NULL) { vlan->egress_priority_map[i] = pm->next; kfree(pm); } } } static void vlan_dev_uninit(struct net_device *dev) { vlan_dev_free_egress_priority(dev); } static netdev_features_t vlan_dev_fix_features(struct net_device *dev, netdev_features_t features) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; netdev_features_t old_features = features; netdev_features_t lower_features; lower_features = netdev_intersect_features((real_dev->vlan_features | NETIF_F_RXCSUM), real_dev->features); /* Add HW_CSUM setting to preserve user ability to control * checksum offload on the vlan device. */ if (lower_features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)) lower_features |= NETIF_F_HW_CSUM; features = netdev_intersect_features(features, lower_features); features |= old_features & (NETIF_F_SOFT_FEATURES | NETIF_F_GSO_SOFTWARE); return features; } static int vlan_ethtool_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); return __ethtool_get_link_ksettings(vlan->real_dev, cmd); } static void vlan_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strscpy(info->driver, vlan_fullname, sizeof(info->driver)); strscpy(info->version, vlan_version, sizeof(info->version)); strscpy(info->fw_version, "N/A", sizeof(info->fw_version)); } static int vlan_ethtool_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info) { const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); return ethtool_get_ts_info_by_layer(vlan->real_dev, info); } static void vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct vlan_pcpu_stats *p; u32 rx_errors = 0, tx_dropped = 0; int i; for_each_possible_cpu(i) { u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes; unsigned int start; p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i); do { start = u64_stats_fetch_begin(&p->syncp); rxpackets = u64_stats_read(&p->rx_packets); rxbytes = u64_stats_read(&p->rx_bytes); rxmulticast = u64_stats_read(&p->rx_multicast); txpackets = u64_stats_read(&p->tx_packets); txbytes = u64_stats_read(&p->tx_bytes); } while (u64_stats_fetch_retry(&p->syncp, start)); stats->rx_packets += rxpackets; stats->rx_bytes += rxbytes; stats->multicast += rxmulticast; stats->tx_packets += txpackets; stats->tx_bytes += txbytes; /* rx_errors & tx_dropped are u32 */ rx_errors += READ_ONCE(p->rx_errors); tx_dropped += READ_ONCE(p->tx_dropped); } stats->rx_errors = rx_errors; stats->tx_dropped = tx_dropped; } #ifdef CONFIG_NET_POLL_CONTROLLER static void vlan_dev_poll_controller(struct net_device *dev) { return; } static int vlan_dev_netpoll_setup(struct net_device *dev) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct net_device *real_dev = vlan->real_dev; struct netpoll *netpoll; int err = 0; netpoll = kzalloc_obj(*netpoll); err = -ENOMEM; if (!netpoll) goto out; err = __netpoll_setup(netpoll, real_dev); if (err) { kfree(netpoll); goto out; } vlan->netpoll = netpoll; out: return err; } static void vlan_dev_netpoll_cleanup(struct net_device *dev) { struct vlan_dev_priv *vlan= vlan_dev_priv(dev); struct netpoll *netpoll = vlan->netpoll; if (!netpoll) return; vlan->netpoll = NULL; __netpoll_free(netpoll); } #endif /* CONFIG_NET_POLL_CONTROLLER */ static int vlan_dev_get_iflink(const struct net_device *dev) { const struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; return READ_ONCE(real_dev->ifindex); } static int vlan_dev_fill_forward_path(struct net_device_path_ctx *ctx, struct net_device_path *path) { struct vlan_dev_priv *vlan = vlan_dev_priv(ctx->dev); path->type = DEV_PATH_VLAN; path->encap.id = vlan->vlan_id; path->encap.proto = vlan->vlan_proto; path->dev = ctx->dev; ctx->dev = vlan->real_dev; if (ctx->num_vlans >= ARRAY_SIZE(ctx->vlan)) return -ENOSPC; ctx->vlan[ctx->num_vlans].id = vlan->vlan_id; ctx->vlan[ctx->num_vlans].proto = vlan->vlan_proto; ctx->num_vlans++; return 0; } #if IS_ENABLED(CONFIG_MACSEC) static const struct macsec_ops *vlan_get_macsec_ops(const struct macsec_context *ctx) { return vlan_dev_priv(ctx->netdev)->real_dev->macsec_ops; } static int vlan_macsec_offload(int (* const func)(struct macsec_context *), struct macsec_context *ctx) { if (unlikely(!func)) return 0; return (*func)(ctx); } static int vlan_macsec_dev_open(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_dev_open, ctx); } static int vlan_macsec_dev_stop(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_dev_stop, ctx); } static int vlan_macsec_add_secy(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_add_secy, ctx); } static int vlan_macsec_upd_secy(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_upd_secy, ctx); } static int vlan_macsec_del_secy(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_del_secy, ctx); } static int vlan_macsec_add_rxsc(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_add_rxsc, ctx); } static int vlan_macsec_upd_rxsc(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_upd_rxsc, ctx); } static int vlan_macsec_del_rxsc(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_del_rxsc, ctx); } static int vlan_macsec_add_rxsa(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_add_rxsa, ctx); } static int vlan_macsec_upd_rxsa(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_upd_rxsa, ctx); } static int vlan_macsec_del_rxsa(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_del_rxsa, ctx); } static int vlan_macsec_add_txsa(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_add_txsa, ctx); } static int vlan_macsec_upd_txsa(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_upd_txsa, ctx); } static int vlan_macsec_del_txsa(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_del_txsa, ctx); } static int vlan_macsec_get_dev_stats(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_get_dev_stats, ctx); } static int vlan_macsec_get_tx_sc_stats(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_get_tx_sc_stats, ctx); } static int vlan_macsec_get_tx_sa_stats(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_get_tx_sa_stats, ctx); } static int vlan_macsec_get_rx_sc_stats(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_get_rx_sc_stats, ctx); } static int vlan_macsec_get_rx_sa_stats(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_get_rx_sa_stats, ctx); } static const struct macsec_ops macsec_offload_ops = { /* Device wide */ .mdo_dev_open = vlan_macsec_dev_open, .mdo_dev_stop = vlan_macsec_dev_stop, /* SecY */ .mdo_add_secy = vlan_macsec_add_secy, .mdo_upd_secy = vlan_macsec_upd_secy, .mdo_del_secy = vlan_macsec_del_secy, /* Security channels */ .mdo_add_rxsc = vlan_macsec_add_rxsc, .mdo_upd_rxsc = vlan_macsec_upd_rxsc, .mdo_del_rxsc = vlan_macsec_del_rxsc, /* Security associations */ .mdo_add_rxsa = vlan_macsec_add_rxsa, .mdo_upd_rxsa = vlan_macsec_upd_rxsa, .mdo_del_rxsa = vlan_macsec_del_rxsa, .mdo_add_txsa = vlan_macsec_add_txsa, .mdo_upd_txsa = vlan_macsec_upd_txsa, .mdo_del_txsa = vlan_macsec_del_txsa, /* Statistics */ .mdo_get_dev_stats = vlan_macsec_get_dev_stats, .mdo_get_tx_sc_stats = vlan_macsec_get_tx_sc_stats, .mdo_get_tx_sa_stats = vlan_macsec_get_tx_sa_stats, .mdo_get_rx_sc_stats = vlan_macsec_get_rx_sc_stats, .mdo_get_rx_sa_stats = vlan_macsec_get_rx_sa_stats, }; #endif static const struct ethtool_ops vlan_ethtool_ops = { .get_link_ksettings = vlan_ethtool_get_link_ksettings, .get_drvinfo = vlan_ethtool_get_drvinfo, .get_link = ethtool_op_get_link, .get_ts_info = vlan_ethtool_get_ts_info, }; static const struct net_device_ops vlan_netdev_ops = { .ndo_change_mtu = vlan_dev_change_mtu, .ndo_init = vlan_dev_init, .ndo_uninit = vlan_dev_uninit, .ndo_open = vlan_dev_open, .ndo_stop = vlan_dev_stop, .ndo_start_xmit = vlan_dev_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = vlan_dev_set_mac_address, .ndo_set_rx_mode = vlan_dev_set_rx_mode, .ndo_change_rx_flags = vlan_dev_change_rx_flags, .ndo_eth_ioctl = vlan_dev_ioctl, .ndo_neigh_setup = vlan_dev_neigh_setup, .ndo_get_stats64 = vlan_dev_get_stats64, #if IS_ENABLED(CONFIG_FCOE) .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, .ndo_fcoe_enable = vlan_dev_fcoe_enable, .ndo_fcoe_disable = vlan_dev_fcoe_disable, .ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target, #endif #ifdef NETDEV_FCOE_WWNN .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, #endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = vlan_dev_poll_controller, .ndo_netpoll_setup = vlan_dev_netpoll_setup, .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup, #endif .ndo_fix_features = vlan_dev_fix_features, .ndo_get_iflink = vlan_dev_get_iflink, .ndo_fill_forward_path = vlan_dev_fill_forward_path, .ndo_hwtstamp_get = vlan_hwtstamp_get, .ndo_hwtstamp_set = vlan_hwtstamp_set, }; static void vlan_dev_free(struct net_device *dev) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); free_percpu(vlan->vlan_pcpu_stats); vlan->vlan_pcpu_stats = NULL; /* Get rid of the vlan's reference to real_dev */ netdev_put(vlan->real_dev, &vlan->dev_tracker); } void vlan_setup(struct net_device *dev) { ether_setup(dev); dev->priv_flags |= IFF_802_1Q_VLAN | IFF_NO_QUEUE; dev->priv_flags |= IFF_UNICAST_FLT; dev->priv_flags &= ~IFF_TX_SKB_SHARING; netif_keep_dst(dev); dev->netdev_ops = &vlan_netdev_ops; dev->needs_free_netdev = true; dev->priv_destructor = vlan_dev_free; dev->ethtool_ops = &vlan_ethtool_ops; #if IS_ENABLED(CONFIG_MACSEC) dev->macsec_ops = &macsec_offload_ops; #endif dev->min_mtu = 0; dev->max_mtu = ETH_MAX_MTU; eth_zero_addr(dev->broadcast); }
69 69 68 66 69 68 68 69 64 64 64 63 64 62 64 74 62 63 62 75 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 /* mpiutil.ac - Utility functions for MPI * Copyright (C) 1998, 1999 Free Software Foundation, Inc. * * This file is part of GnuPG. * * GnuPG is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * GnuPG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ #include <linux/export.h> #include "mpi-internal.h" /**************** * Note: It was a bad idea to use the number of limbs to allocate * because on a alpha the limbs are large but we normally need * integers of n bits - So we should change this to bits (or bytes). * * But mpi_alloc is used in a lot of places :-) */ MPI mpi_alloc(unsigned nlimbs) { MPI a; a = kmalloc_obj(*a); if (!a) return a; if (nlimbs) { a->d = mpi_alloc_limb_space(nlimbs); if (!a->d) { kfree(a); return NULL; } } else { a->d = NULL; } a->alloced = nlimbs; a->nlimbs = 0; a->sign = 0; a->flags = 0; a->nbits = 0; return a; } EXPORT_SYMBOL_GPL(mpi_alloc); mpi_ptr_t mpi_alloc_limb_space(unsigned nlimbs) { size_t len = nlimbs * sizeof(mpi_limb_t); if (!len) return NULL; return kmalloc(len, GFP_KERNEL); } void mpi_free_limb_space(mpi_ptr_t a) { if (!a) return; kfree_sensitive(a); } void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs) { mpi_free_limb_space(a->d); a->d = ap; a->alloced = nlimbs; } /**************** * Resize the array of A to NLIMBS. the additional space is cleared * (set to 0) [done by m_realloc()] */ int mpi_resize(MPI a, unsigned nlimbs) { void *p; if (nlimbs <= a->alloced) return 0; /* no need to do it */ if (a->d) { p = kzalloc_objs(mpi_limb_t, nlimbs); if (!p) return -ENOMEM; memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t)); kfree_sensitive(a->d); a->d = p; } else { a->d = kzalloc_objs(mpi_limb_t, nlimbs); if (!a->d) return -ENOMEM; } a->alloced = nlimbs; return 0; } void mpi_free(MPI a) { if (!a) return; if (a->flags & 4) kfree_sensitive(a->d); else mpi_free_limb_space(a->d); if (a->flags & ~7) pr_info("invalid flag value in mpi\n"); kfree(a); } EXPORT_SYMBOL_GPL(mpi_free); /**************** * Note: This copy function should not interpret the MPI * but copy it transparently. */ MPI mpi_copy(MPI a) { int i; MPI b; if (a) { b = mpi_alloc(a->nlimbs); if (!b) return NULL; b->nlimbs = a->nlimbs; b->sign = a->sign; b->flags = a->flags; b->flags &= ~(16|32); /* Reset the immutable and constant flags. */ for (i = 0; i < b->nlimbs; i++) b->d[i] = a->d[i]; } else b = NULL; return b; } MODULE_DESCRIPTION("Multiprecision maths library"); MODULE_LICENSE("GPL");
11 2 2 2 10 17 1 6 7 6 6 6 5 6 7 6 3 4 3 3 3 1 1 3 21 21 21 31 31 5 26 14 12 4 3 18 18 17 16 16 16 16 16 17 16 16 16 16 16 5 6 5 5 5 4 4 5 5 5 5 4 4 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 // SPDX-License-Identifier: GPL-2.0-or-later /* net/sched/sch_ingress.c - Ingress and clsact qdisc * * Authors: Jamal Hadi Salim 1999 */ #include <linux/module.h> #include <linux/types.h> #include <linux/list.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <net/netlink.h> #include <net/pkt_sched.h> #include <net/pkt_cls.h> #include <net/tcx.h> struct ingress_sched_data { struct tcf_block *block; struct tcf_block_ext_info block_info; struct mini_Qdisc_pair miniqp; }; static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg) { return NULL; } static unsigned long ingress_find(struct Qdisc *sch, u32 classid) { return TC_H_MIN(classid) + 1; } static unsigned long ingress_bind_filter(struct Qdisc *sch, unsigned long parent, u32 classid) { return ingress_find(sch, classid); } static void ingress_unbind_filter(struct Qdisc *sch, unsigned long cl) { } static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker) { } static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl, struct netlink_ext_ack *extack) { struct ingress_sched_data *q = qdisc_priv(sch); return q->block; } static void clsact_chain_head_change(struct tcf_proto *tp_head, void *priv) { struct mini_Qdisc_pair *miniqp = priv; mini_qdisc_pair_swap(miniqp, tp_head); }; static void ingress_ingress_block_set(struct Qdisc *sch, u32 block_index) { struct ingress_sched_data *q = qdisc_priv(sch); q->block_info.block_index = block_index; } static u32 ingress_ingress_block_get(struct Qdisc *sch) { struct ingress_sched_data *q = qdisc_priv(sch); return q->block_info.block_index; } static int ingress_init(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct ingress_sched_data *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); struct bpf_mprog_entry *entry; bool created; int err; if (sch->parent != TC_H_INGRESS) return -EOPNOTSUPP; net_inc_ingress_queue(); entry = tcx_entry_fetch_or_create(dev, true, &created); if (!entry) return -ENOMEM; tcx_miniq_inc(entry); mini_qdisc_pair_init(&q->miniqp, sch, &tcx_entry(entry)->miniq); if (created) tcx_entry_update(dev, entry, true); q->block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; q->block_info.chain_head_change = clsact_chain_head_change; q->block_info.chain_head_change_priv = &q->miniqp; err = tcf_block_get_ext(&q->block, sch, &q->block_info, extack); if (err) return err; mini_qdisc_pair_block_init(&q->miniqp, q->block); return 0; } static void ingress_destroy(struct Qdisc *sch) { struct ingress_sched_data *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); struct bpf_mprog_entry *entry; if (sch->parent != TC_H_INGRESS) return; tcf_block_put_ext(q->block, sch, &q->block_info); if (mini_qdisc_pair_inited(&q->miniqp)) { entry = rtnl_dereference(dev->tcx_ingress); tcx_miniq_dec(entry); if (!tcx_entry_is_active(entry)) { tcx_entry_update(dev, NULL, true); tcx_entry_free(entry); } } net_dec_ingress_queue(); } static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb) { struct nlattr *nest; nest = nla_nest_start_noflag(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; return nla_nest_end(skb, nest); nla_put_failure: nla_nest_cancel(skb, nest); return -1; } static const struct Qdisc_class_ops ingress_class_ops = { .flags = QDISC_CLASS_OPS_DOIT_UNLOCKED, .leaf = ingress_leaf, .find = ingress_find, .walk = ingress_walk, .tcf_block = ingress_tcf_block, .bind_tcf = ingress_bind_filter, .unbind_tcf = ingress_unbind_filter, }; static struct Qdisc_ops ingress_qdisc_ops __read_mostly = { .cl_ops = &ingress_class_ops, .id = "ingress", .priv_size = sizeof(struct ingress_sched_data), .static_flags = TCQ_F_INGRESS | TCQ_F_CPUSTATS, .init = ingress_init, .destroy = ingress_destroy, .dump = ingress_dump, .ingress_block_set = ingress_ingress_block_set, .ingress_block_get = ingress_ingress_block_get, .owner = THIS_MODULE, }; MODULE_ALIAS_NET_SCH("ingress"); struct clsact_sched_data { struct tcf_block *ingress_block; struct tcf_block *egress_block; struct tcf_block_ext_info ingress_block_info; struct tcf_block_ext_info egress_block_info; struct mini_Qdisc_pair miniqp_ingress; struct mini_Qdisc_pair miniqp_egress; }; static unsigned long clsact_find(struct Qdisc *sch, u32 classid) { switch (TC_H_MIN(classid)) { case TC_H_MIN(TC_H_MIN_INGRESS): case TC_H_MIN(TC_H_MIN_EGRESS): return TC_H_MIN(classid); default: return 0; } } static unsigned long clsact_bind_filter(struct Qdisc *sch, unsigned long parent, u32 classid) { return clsact_find(sch, classid); } static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl, struct netlink_ext_ack *extack) { struct clsact_sched_data *q = qdisc_priv(sch); switch (cl) { case TC_H_MIN(TC_H_MIN_INGRESS): return q->ingress_block; case TC_H_MIN(TC_H_MIN_EGRESS): return q->egress_block; default: return NULL; } } static void clsact_ingress_block_set(struct Qdisc *sch, u32 block_index) { struct clsact_sched_data *q = qdisc_priv(sch); q->ingress_block_info.block_index = block_index; } static void clsact_egress_block_set(struct Qdisc *sch, u32 block_index) { struct clsact_sched_data *q = qdisc_priv(sch); q->egress_block_info.block_index = block_index; } static u32 clsact_ingress_block_get(struct Qdisc *sch) { struct clsact_sched_data *q = qdisc_priv(sch); return q->ingress_block_info.block_index; } static u32 clsact_egress_block_get(struct Qdisc *sch) { struct clsact_sched_data *q = qdisc_priv(sch); return q->egress_block_info.block_index; } static int clsact_init(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct clsact_sched_data *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); struct bpf_mprog_entry *entry; bool created; int err; if (sch->parent != TC_H_CLSACT) return -EOPNOTSUPP; net_inc_ingress_queue(); net_inc_egress_queue(); entry = tcx_entry_fetch_or_create(dev, true, &created); if (!entry) return -ENOMEM; tcx_miniq_inc(entry); mini_qdisc_pair_init(&q->miniqp_ingress, sch, &tcx_entry(entry)->miniq); if (created) tcx_entry_update(dev, entry, true); q->ingress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; q->ingress_block_info.chain_head_change = clsact_chain_head_change; q->ingress_block_info.chain_head_change_priv = &q->miniqp_ingress; err = tcf_block_get_ext(&q->ingress_block, sch, &q->ingress_block_info, extack); if (err) return err; mini_qdisc_pair_block_init(&q->miniqp_ingress, q->ingress_block); entry = tcx_entry_fetch_or_create(dev, false, &created); if (!entry) return -ENOMEM; tcx_miniq_inc(entry); mini_qdisc_pair_init(&q->miniqp_egress, sch, &tcx_entry(entry)->miniq); if (created) tcx_entry_update(dev, entry, false); q->egress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS; q->egress_block_info.chain_head_change = clsact_chain_head_change; q->egress_block_info.chain_head_change_priv = &q->miniqp_egress; return tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info, extack); } static void clsact_destroy(struct Qdisc *sch) { struct bpf_mprog_entry *ingress_entry, *egress_entry; struct clsact_sched_data *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); if (sch->parent != TC_H_CLSACT) return; tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info); tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info); if (mini_qdisc_pair_inited(&q->miniqp_ingress)) { ingress_entry = rtnl_dereference(dev->tcx_ingress); tcx_miniq_dec(ingress_entry); if (!tcx_entry_is_active(ingress_entry)) { tcx_entry_update(dev, NULL, true); tcx_entry_free(ingress_entry); } } if (mini_qdisc_pair_inited(&q->miniqp_egress)) { egress_entry = rtnl_dereference(dev->tcx_egress); tcx_miniq_dec(egress_entry); if (!tcx_entry_is_active(egress_entry)) { tcx_entry_update(dev, NULL, false); tcx_entry_free(egress_entry); } } net_dec_ingress_queue(); net_dec_egress_queue(); } static const struct Qdisc_class_ops clsact_class_ops = { .flags = QDISC_CLASS_OPS_DOIT_UNLOCKED, .leaf = ingress_leaf, .find = clsact_find, .walk = ingress_walk, .tcf_block = clsact_tcf_block, .bind_tcf = clsact_bind_filter, .unbind_tcf = ingress_unbind_filter, }; static struct Qdisc_ops clsact_qdisc_ops __read_mostly = { .cl_ops = &clsact_class_ops, .id = "clsact", .priv_size = sizeof(struct clsact_sched_data), .static_flags = TCQ_F_INGRESS | TCQ_F_CPUSTATS, .init = clsact_init, .destroy = clsact_destroy, .dump = ingress_dump, .ingress_block_set = clsact_ingress_block_set, .egress_block_set = clsact_egress_block_set, .ingress_block_get = clsact_ingress_block_get, .egress_block_get = clsact_egress_block_get, .owner = THIS_MODULE, }; MODULE_ALIAS_NET_SCH("clsact"); static int __init ingress_module_init(void) { int ret; ret = register_qdisc(&ingress_qdisc_ops); if (!ret) { ret = register_qdisc(&clsact_qdisc_ops); if (ret) unregister_qdisc(&ingress_qdisc_ops); } return ret; } static void __exit ingress_module_exit(void) { unregister_qdisc(&ingress_qdisc_ops); unregister_qdisc(&clsact_qdisc_ops); } module_init(ingress_module_init); module_exit(ingress_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Ingress and clsact based ingress and egress qdiscs");
553 549 8320 9453 733 731 114 114 329 264 29 131 312 65 1 14 78 43 131 585 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_GFP_H #define __LINUX_GFP_H #include <linux/gfp_types.h> #include <linux/mmzone.h> #include <linux/topology.h> #include <linux/alloc_tag.h> #include <linux/cleanup.h> #include <linux/sched.h> struct vm_area_struct; struct mempolicy; /* Helper macro to avoid gfp flags if they are the default one */ #define __default_gfp(a,b,...) b #define default_gfp(...) __default_gfp(,##__VA_ARGS__,GFP_KERNEL) /* Convert GFP flags to their corresponding migrate type */ #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) #define GFP_MOVABLE_SHIFT 3 static inline int gfp_migratetype(const gfp_t gfp_flags) { VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE); BUILD_BUG_ON((___GFP_RECLAIMABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_RECLAIMABLE); BUILD_BUG_ON(((___GFP_MOVABLE | ___GFP_RECLAIMABLE) >> GFP_MOVABLE_SHIFT) != MIGRATE_HIGHATOMIC); if (unlikely(page_group_by_mobility_disabled)) return MIGRATE_UNMOVABLE; /* Group based on mobility */ return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; } #undef GFP_MOVABLE_MASK #undef GFP_MOVABLE_SHIFT static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) { return !!(gfp_flags & __GFP_DIRECT_RECLAIM); } static inline bool gfpflags_allow_spinning(const gfp_t gfp_flags) { /* * !__GFP_DIRECT_RECLAIM -> direct claim is not allowed. * !__GFP_KSWAPD_RECLAIM -> it's not safe to wake up kswapd. * All GFP_* flags including GFP_NOWAIT use one or both flags. * alloc_pages_nolock() is the only API that doesn't specify either flag. * * This is stronger than GFP_NOWAIT or GFP_ATOMIC because * those are guaranteed to never block on a sleeping lock. * Here we are enforcing that the allocation doesn't ever spin * on any locks (i.e. only trylocks). There is no high level * GFP_$FOO flag for this use in alloc_pages_nolock() as the * regular page allocator doesn't fully support this * allocation mode. */ return !!(gfp_flags & __GFP_RECLAIM); } #ifdef CONFIG_HIGHMEM #define OPT_ZONE_HIGHMEM ZONE_HIGHMEM #else #define OPT_ZONE_HIGHMEM ZONE_NORMAL #endif #ifdef CONFIG_ZONE_DMA #define OPT_ZONE_DMA ZONE_DMA #else #define OPT_ZONE_DMA ZONE_NORMAL #endif #ifdef CONFIG_ZONE_DMA32 #define OPT_ZONE_DMA32 ZONE_DMA32 #else #define OPT_ZONE_DMA32 ZONE_NORMAL #endif /* * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the * zone to use given the lowest 4 bits of gfp_t. Entries are GFP_ZONES_SHIFT * bits long and there are 16 of them to cover all possible combinations of * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM. * * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. * But GFP_MOVABLE is not only a zone specifier but also an allocation * policy. Therefore __GFP_MOVABLE plus another zone selector is valid. * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1". * * bit result * ================= * 0x0 => NORMAL * 0x1 => DMA or NORMAL * 0x2 => HIGHMEM or NORMAL * 0x3 => BAD (DMA+HIGHMEM) * 0x4 => DMA32 or NORMAL * 0x5 => BAD (DMA+DMA32) * 0x6 => BAD (HIGHMEM+DMA32) * 0x7 => BAD (HIGHMEM+DMA32+DMA) * 0x8 => NORMAL (MOVABLE+0) * 0x9 => DMA or NORMAL (MOVABLE+DMA) * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too) * 0xb => BAD (MOVABLE+HIGHMEM+DMA) * 0xc => DMA32 or NORMAL (MOVABLE+DMA32) * 0xd => BAD (MOVABLE+DMA32+DMA) * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) * * GFP_ZONES_SHIFT must be <= 2 on 32 bit platforms. */ #if defined(CONFIG_ZONE_DEVICE) && (MAX_NR_ZONES-1) <= 4 /* ZONE_DEVICE is not a valid GFP zone specifier */ #define GFP_ZONES_SHIFT 2 #else #define GFP_ZONES_SHIFT ZONES_SHIFT #endif #if 16 * GFP_ZONES_SHIFT > BITS_PER_LONG #error GFP_ZONES_SHIFT too large to create GFP_ZONE_TABLE integer #endif #define GFP_ZONE_TABLE ( \ (ZONE_NORMAL << 0 * GFP_ZONES_SHIFT) \ | (OPT_ZONE_DMA << ___GFP_DMA * GFP_ZONES_SHIFT) \ | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * GFP_ZONES_SHIFT) \ | (OPT_ZONE_DMA32 << ___GFP_DMA32 * GFP_ZONES_SHIFT) \ | (ZONE_NORMAL << ___GFP_MOVABLE * GFP_ZONES_SHIFT) \ | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * GFP_ZONES_SHIFT) \ | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * GFP_ZONES_SHIFT)\ | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * GFP_ZONES_SHIFT)\ ) /* * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32 * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per * entry starting with bit 0. Bit is set if the combination is not * allowed. */ #define GFP_ZONE_BAD ( \ 1 << (___GFP_DMA | ___GFP_HIGHMEM) \ | 1 << (___GFP_DMA | ___GFP_DMA32) \ | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \ | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \ | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \ | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \ | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \ | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \ ) static inline enum zone_type gfp_zone(gfp_t flags) { enum zone_type z; int bit = (__force int) (flags & GFP_ZONEMASK); z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) & ((1 << GFP_ZONES_SHIFT) - 1); VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1); return z; } /* * There is only one page-allocator function, and two main namespaces to * it. The alloc_page*() variants return 'struct page *' and as such * can allocate highmem pages, the *get*page*() variants return * virtual kernel addresses to the allocated page(s). */ static inline int gfp_zonelist(gfp_t flags) { #ifdef CONFIG_NUMA if (unlikely(flags & __GFP_THISNODE)) return ZONELIST_NOFALLBACK; #endif return ZONELIST_FALLBACK; } /* * gfp flag masking for nested internal allocations. * * For code that needs to do allocations inside the public allocation API (e.g. * memory allocation tracking code) the allocations need to obey the caller * allocation context constrains to prevent allocation context mismatches (e.g. * GFP_KERNEL allocations in GFP_NOFS contexts) from potential deadlock * situations. * * It is also assumed that these nested allocations are for internal kernel * object storage purposes only and are not going to be used for DMA, etc. Hence * we strip out all the zone information and leave just the context information * intact. * * Further, internal allocations must fail before the higher level allocation * can fail, so we must make them fail faster and fail silently. We also don't * want them to deplete emergency reserves. Hence nested allocations must be * prepared for these allocations to fail. */ static inline gfp_t gfp_nested_mask(gfp_t flags) { return ((flags & (GFP_KERNEL | GFP_ATOMIC | __GFP_NOLOCKDEP)) | (__GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)); } /* * We get the zone list from the current node and the gfp_mask. * This zone list contains a maximum of MAX_NUMNODES*MAX_NR_ZONES zones. * There are two zonelists per node, one for all zones with memory and * one containing just zones from the node the zonelist belongs to. * * For the case of non-NUMA systems the NODE_DATA() gets optimized to * &contig_page_data at compile-time. */ static inline struct zonelist *node_zonelist(int nid, gfp_t flags) { return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags); } #ifndef HAVE_ARCH_FREE_PAGE static inline void arch_free_page(struct page *page, int order) { } #endif #ifndef HAVE_ARCH_ALLOC_PAGE static inline void arch_alloc_page(struct page *page, int order) { } #endif struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, int preferred_nid, nodemask_t *nodemask); #define __alloc_pages(...) alloc_hooks(__alloc_pages_noprof(__VA_ARGS__)) struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid, nodemask_t *nodemask); #define __folio_alloc(...) alloc_hooks(__folio_alloc_noprof(__VA_ARGS__)) unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, nodemask_t *nodemask, int nr_pages, struct page **page_array); #define __alloc_pages_bulk(...) alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__)) unsigned long alloc_pages_bulk_mempolicy_noprof(gfp_t gfp, unsigned long nr_pages, struct page **page_array); #define alloc_pages_bulk_mempolicy(...) \ alloc_hooks(alloc_pages_bulk_mempolicy_noprof(__VA_ARGS__)) /* Bulk allocate order-0 pages */ #define alloc_pages_bulk(_gfp, _nr_pages, _page_array) \ __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _page_array) static inline unsigned long alloc_pages_bulk_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages, struct page **page_array) { if (nid == NUMA_NO_NODE) nid = numa_mem_id(); return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, page_array); } #define alloc_pages_bulk_node(...) \ alloc_hooks(alloc_pages_bulk_node_noprof(__VA_ARGS__)) static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask) { gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN); if (warn_gfp != (__GFP_THISNODE|__GFP_NOWARN)) return; if (node_online(this_node)) return; pr_warn("%pGg allocation from offline node %d\n", &gfp_mask, this_node); dump_stack(); } /* * Allocate pages, preferring the node given as nid. The node must be valid and * online. For more general interface, see alloc_pages_node(). */ static inline struct page * __alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order) { VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); warn_if_node_offline(nid, gfp_mask); return __alloc_pages_noprof(gfp_mask, order, nid, NULL); } #define __alloc_pages_node(...) alloc_hooks(__alloc_pages_node_noprof(__VA_ARGS__)) static inline struct folio *__folio_alloc_node_noprof(gfp_t gfp, unsigned int order, int nid) { VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); warn_if_node_offline(nid, gfp); return __folio_alloc_noprof(gfp, order, nid, NULL); } #define __folio_alloc_node(...) alloc_hooks(__folio_alloc_node_noprof(__VA_ARGS__)) /* * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE, * prefer the current CPU's closest node. Otherwise node must be valid and * online. */ static inline struct page *alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order) { if (nid == NUMA_NO_NODE) nid = numa_mem_id(); return __alloc_pages_node_noprof(nid, gfp_mask, order); } #define alloc_pages_node(...) alloc_hooks(alloc_pages_node_noprof(__VA_ARGS__)) #ifdef CONFIG_NUMA struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order); struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order); struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order, struct mempolicy *mpol, pgoff_t ilx, int nid); struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma, unsigned long addr); #else static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order) { return alloc_pages_node_noprof(numa_node_id(), gfp_mask, order); } static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order) { return __folio_alloc_node_noprof(gfp, order, numa_node_id()); } static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order, struct mempolicy *mpol, pgoff_t ilx, int nid) { return folio_alloc_noprof(gfp, order); } static inline struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma, unsigned long addr) { return folio_alloc_noprof(gfp, order); } #endif #define alloc_pages(...) alloc_hooks(alloc_pages_noprof(__VA_ARGS__)) #define folio_alloc(...) alloc_hooks(folio_alloc_noprof(__VA_ARGS__)) #define folio_alloc_mpol(...) alloc_hooks(folio_alloc_mpol_noprof(__VA_ARGS__)) #define vma_alloc_folio(...) alloc_hooks(vma_alloc_folio_noprof(__VA_ARGS__)) #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) static inline struct page *alloc_page_vma_noprof(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr) { struct folio *folio = vma_alloc_folio_noprof(gfp, 0, vma, addr); return &folio->page; } #define alloc_page_vma(...) alloc_hooks(alloc_page_vma_noprof(__VA_ARGS__)) struct page *alloc_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order); #define alloc_pages_nolock(...) alloc_hooks(alloc_pages_nolock_noprof(__VA_ARGS__)) extern unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order); #define __get_free_pages(...) alloc_hooks(get_free_pages_noprof(__VA_ARGS__)) extern unsigned long get_zeroed_page_noprof(gfp_t gfp_mask); #define get_zeroed_page(...) alloc_hooks(get_zeroed_page_noprof(__VA_ARGS__)) void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) __alloc_size(1); #define alloc_pages_exact(...) alloc_hooks(alloc_pages_exact_noprof(__VA_ARGS__)) void free_pages_exact(void *virt, size_t size); __meminit void *alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2); #define alloc_pages_exact_nid(...) \ alloc_hooks(alloc_pages_exact_nid_noprof(__VA_ARGS__)) #define __get_free_page(gfp_mask) \ __get_free_pages((gfp_mask), 0) #define __get_dma_pages(gfp_mask, order) \ __get_free_pages((gfp_mask) | GFP_DMA, (order)) extern void __free_pages(struct page *page, unsigned int order); extern void free_pages_nolock(struct page *page, unsigned int order); extern void free_pages(unsigned long addr, unsigned int order); #define __free_page(page) __free_pages((page), 0) #define free_page(addr) free_pages((addr), 0) void page_alloc_init_cpuhp(void); bool decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp); void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); void drain_all_pages(struct zone *zone); void drain_local_pages(struct zone *zone); void page_alloc_init_late(void); void setup_pcp_cacheinfo(unsigned int cpu); /* * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what * GFP flags are used before interrupts are enabled. Once interrupts are * enabled, it is set to __GFP_BITS_MASK while the system is running. During * hibernation, it is used by PM to avoid I/O during memory allocation while * devices are suspended. */ extern gfp_t gfp_allowed_mask; /* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask); /* A helper for checking if gfp includes all the specified flags */ static inline bool gfp_has_flags(gfp_t gfp, gfp_t flags) { return (gfp & flags) == flags; } static inline bool gfp_has_io_fs(gfp_t gfp) { return gfp_has_flags(gfp, __GFP_IO | __GFP_FS); } /* * Check if the gfp flags allow compaction - GFP_NOIO is a really * tricky context because the migration might require IO. */ static inline bool gfp_compaction_allowed(gfp_t gfp_mask) { return IS_ENABLED(CONFIG_COMPACTION) && (gfp_mask & __GFP_IO); } extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma); #ifdef CONFIG_CONTIG_ALLOC typedef unsigned int __bitwise acr_flags_t; #define ACR_FLAGS_NONE ((__force acr_flags_t)0) // ordinary allocation request #define ACR_FLAGS_CMA ((__force acr_flags_t)BIT(0)) // allocate for CMA /* The below functions must be run on a range from a single zone. */ int alloc_contig_frozen_range_noprof(unsigned long start, unsigned long end, acr_flags_t alloc_flags, gfp_t gfp_mask); #define alloc_contig_frozen_range(...) \ alloc_hooks(alloc_contig_frozen_range_noprof(__VA_ARGS__)) int alloc_contig_range_noprof(unsigned long start, unsigned long end, acr_flags_t alloc_flags, gfp_t gfp_mask); #define alloc_contig_range(...) \ alloc_hooks(alloc_contig_range_noprof(__VA_ARGS__)) struct page *alloc_contig_frozen_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask, int nid, nodemask_t *nodemask); #define alloc_contig_frozen_pages(...) \ alloc_hooks(alloc_contig_frozen_pages_noprof(__VA_ARGS__)) struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask, int nid, nodemask_t *nodemask); #define alloc_contig_pages(...) \ alloc_hooks(alloc_contig_pages_noprof(__VA_ARGS__)) void free_contig_frozen_range(unsigned long pfn, unsigned long nr_pages); void free_contig_range(unsigned long pfn, unsigned long nr_pages); #endif DEFINE_FREE(free_page, void *, free_page((unsigned long)_T)) #endif /* __LINUX_GFP_H */
13 16 16 11 14 21 9 9 8 8 8 29 13 27 27 27 4 14 7 12 6 4 17 17 17 17 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ #ifndef _LINUX_SKMSG_H #define _LINUX_SKMSG_H #include <linux/bpf.h> #include <linux/filter.h> #include <linux/scatterlist.h> #include <linux/skbuff.h> #include <net/sock.h> #include <net/tcp.h> #include <net/strparser.h> #define MAX_MSG_FRAGS MAX_SKB_FRAGS #define NR_MSG_FRAG_IDS (MAX_MSG_FRAGS + 1) enum __sk_action { __SK_DROP = 0, __SK_PASS, __SK_REDIRECT, __SK_NONE, }; struct sk_msg_sg { u32 start; u32 curr; u32 end; u32 size; u32 copybreak; DECLARE_BITMAP(copy, MAX_MSG_FRAGS + 2); /* The extra two elements: * 1) used for chaining the front and sections when the list becomes * partitioned (e.g. end < start). The crypto APIs require the * chaining; * 2) to chain tailer SG entries after the message. */ struct scatterlist data[MAX_MSG_FRAGS + 2]; }; /* UAPI in filter.c depends on struct sk_msg_sg being first element. */ struct sk_msg { struct sk_msg_sg sg; void *data; void *data_end; u32 apply_bytes; u32 cork_bytes; u32 flags; struct sk_buff *skb; struct sock *sk_redir; struct sock *sk; struct list_head list; }; struct sk_psock_progs { struct bpf_prog *msg_parser; struct bpf_prog *stream_parser; struct bpf_prog *stream_verdict; struct bpf_prog *skb_verdict; struct bpf_link *msg_parser_link; struct bpf_link *stream_parser_link; struct bpf_link *stream_verdict_link; struct bpf_link *skb_verdict_link; }; enum sk_psock_state_bits { SK_PSOCK_TX_ENABLED, SK_PSOCK_RX_STRP_ENABLED, }; struct sk_psock_link { struct list_head list; struct bpf_map *map; void *link_raw; }; struct sk_psock_work_state { u32 len; u32 off; }; struct sk_psock { struct sock *sk; struct sock *sk_redir; u32 apply_bytes; u32 cork_bytes; u32 eval; bool redir_ingress; /* undefined if sk_redir is null */ struct sk_msg *cork; struct sk_psock_progs progs; #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) struct strparser strp; u32 copied_seq; u32 ingress_bytes; #endif struct sk_buff_head ingress_skb; struct list_head ingress_msg; spinlock_t ingress_lock; /** @msg_tot_len: Total bytes queued in ingress_msg list. */ u32 msg_tot_len; unsigned long state; struct list_head link; spinlock_t link_lock; refcount_t refcnt; void (*saved_unhash)(struct sock *sk); void (*saved_destroy)(struct sock *sk); void (*saved_close)(struct sock *sk, long timeout); void (*saved_write_space)(struct sock *sk); void (*saved_data_ready)(struct sock *sk); /* psock_update_sk_prot may be called with restore=false many times * so the handler must be safe for this case. It will be called * exactly once with restore=true when the psock is being destroyed * and psock refcnt is zero, but before an RCU grace period. */ int (*psock_update_sk_prot)(struct sock *sk, struct sk_psock *psock, bool restore); struct proto *sk_proto; struct mutex work_mutex; struct sk_psock_work_state work_state; struct delayed_work work; struct sock *sk_pair; struct rcu_work rwork; }; int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len, int elem_first_coalesce); int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src, u32 off, u32 len); void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len); int sk_msg_free(struct sock *sk, struct sk_msg *msg); int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg); void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes); void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg, u32 bytes); void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes); void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes); int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from, struct sk_msg *msg, u32 bytes); int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from, struct sk_msg *msg, u32 bytes); int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, int len, int flags); int __sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, int len, int flags, int *copied_from_self); bool sk_msg_is_readable(struct sock *sk); static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes) { WARN_ON(i == msg->sg.end && bytes); } static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes) { if (psock->apply_bytes) { if (psock->apply_bytes < bytes) psock->apply_bytes = 0; else psock->apply_bytes -= bytes; } } static inline u32 sk_msg_iter_dist(u32 start, u32 end) { return end >= start ? end - start : end + (NR_MSG_FRAG_IDS - start); } #define sk_msg_iter_var_prev(var) \ do { \ if (var == 0) \ var = NR_MSG_FRAG_IDS - 1; \ else \ var--; \ } while (0) #define sk_msg_iter_var_next(var) \ do { \ var++; \ if (var == NR_MSG_FRAG_IDS) \ var = 0; \ } while (0) #define sk_msg_iter_prev(msg, which) \ sk_msg_iter_var_prev(msg->sg.which) #define sk_msg_iter_next(msg, which) \ sk_msg_iter_var_next(msg->sg.which) static inline void sk_msg_init(struct sk_msg *msg) { BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS); memset(msg, 0, sizeof(*msg)); sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS); } static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src, int which, u32 size) { dst->sg.data[which] = src->sg.data[which]; dst->sg.data[which].length = size; dst->sg.size += size; src->sg.size -= size; src->sg.data[which].length -= size; src->sg.data[which].offset += size; } static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src) { memcpy(dst, src, sizeof(*src)); sk_msg_init(src); } static inline bool sk_msg_full(const struct sk_msg *msg) { return sk_msg_iter_dist(msg->sg.start, msg->sg.end) == MAX_MSG_FRAGS; } static inline u32 sk_msg_elem_used(const struct sk_msg *msg) { return sk_msg_iter_dist(msg->sg.start, msg->sg.end); } static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which) { return &msg->sg.data[which]; } static inline struct scatterlist sk_msg_elem_cpy(struct sk_msg *msg, int which) { return msg->sg.data[which]; } static inline struct page *sk_msg_page(struct sk_msg *msg, int which) { return sg_page(sk_msg_elem(msg, which)); } static inline bool sk_msg_to_ingress(const struct sk_msg *msg) { return msg->flags & BPF_F_INGRESS; } static inline void sk_msg_compute_data_pointers(struct sk_msg *msg) { struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start); if (test_bit(msg->sg.start, msg->sg.copy)) { msg->data = NULL; msg->data_end = NULL; } else { msg->data = sg_virt(sge); msg->data_end = msg->data + sge->length; } } static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page, u32 len, u32 offset) { struct scatterlist *sge; get_page(page); sge = sk_msg_elem(msg, msg->sg.end); sg_set_page(sge, page, len, offset); sg_unmark_end(sge); __set_bit(msg->sg.end, msg->sg.copy); msg->sg.size += len; sk_msg_iter_next(msg, end); } static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state) { do { if (copy_state) __set_bit(i, msg->sg.copy); else __clear_bit(i, msg->sg.copy); sk_msg_iter_var_next(i); if (i == msg->sg.end) break; } while (1); } static inline void sk_msg_sg_copy_set(struct sk_msg *msg, u32 start) { sk_msg_sg_copy(msg, start, true); } static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start) { sk_msg_sg_copy(msg, start, false); } static inline struct sk_psock *sk_psock(const struct sock *sk) { return __rcu_dereference_sk_user_data_with_flags(sk, SK_USER_DATA_PSOCK); } static inline void sk_psock_set_state(struct sk_psock *psock, enum sk_psock_state_bits bit) { set_bit(bit, &psock->state); } static inline void sk_psock_clear_state(struct sk_psock *psock, enum sk_psock_state_bits bit) { clear_bit(bit, &psock->state); } static inline bool sk_psock_test_state(const struct sk_psock *psock, enum sk_psock_state_bits bit) { return test_bit(bit, &psock->state); } static inline void sock_drop(struct sock *sk, struct sk_buff *skb) { sk_drops_skbadd(sk, skb); kfree_skb(skb); } static inline u32 sk_psock_get_msg_len_nolock(struct sk_psock *psock) { /* Used by ioctl to read msg_tot_len only; lock-free for performance */ return READ_ONCE(psock->msg_tot_len); } static inline void sk_psock_msg_len_add_locked(struct sk_psock *psock, int diff) { /* Use WRITE_ONCE to ensure correct read in sk_psock_get_msg_len_nolock(). * ingress_lock should be held to prevent concurrent updates to msg_tot_len */ WRITE_ONCE(psock->msg_tot_len, psock->msg_tot_len + diff); } static inline void sk_psock_msg_len_add(struct sk_psock *psock, int diff) { spin_lock_bh(&psock->ingress_lock); sk_psock_msg_len_add_locked(psock, diff); spin_unlock_bh(&psock->ingress_lock); } static inline bool sk_psock_queue_msg(struct sk_psock *psock, struct sk_msg *msg) { bool ret; spin_lock_bh(&psock->ingress_lock); if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { list_add_tail(&msg->list, &psock->ingress_msg); sk_psock_msg_len_add_locked(psock, msg->sg.size); ret = true; } else { sk_msg_free(psock->sk, msg); kfree(msg); ret = false; } spin_unlock_bh(&psock->ingress_lock); return ret; } static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock) { struct sk_msg *msg; spin_lock_bh(&psock->ingress_lock); msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list); if (msg) { list_del(&msg->list); sk_psock_msg_len_add_locked(psock, -msg->sg.size); } spin_unlock_bh(&psock->ingress_lock); return msg; } static inline struct sk_msg *sk_psock_peek_msg_locked(struct sk_psock *psock) { return list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list); } static inline struct sk_msg *sk_psock_peek_msg(struct sk_psock *psock) { struct sk_msg *msg; spin_lock_bh(&psock->ingress_lock); msg = sk_psock_peek_msg_locked(psock); spin_unlock_bh(&psock->ingress_lock); return msg; } static inline struct sk_msg *sk_psock_next_msg(struct sk_psock *psock, struct sk_msg *msg) { struct sk_msg *ret; spin_lock_bh(&psock->ingress_lock); if (list_is_last(&msg->list, &psock->ingress_msg)) ret = NULL; else ret = list_next_entry(msg, list); spin_unlock_bh(&psock->ingress_lock); return ret; } static inline bool sk_psock_queue_empty(const struct sk_psock *psock) { return psock ? list_empty(&psock->ingress_msg) : true; } static inline void kfree_sk_msg(struct sk_msg *msg) { if (msg->skb) consume_skb(msg->skb); kfree(msg); } static inline void sk_psock_report_error(struct sk_psock *psock, int err) { struct sock *sk = psock->sk; sk->sk_err = err; sk_error_report(sk); } struct sk_psock *sk_psock_init(struct sock *sk, int node); void sk_psock_stop(struct sk_psock *psock); #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock); void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock); void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock); #else static inline int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock) { return -EOPNOTSUPP; } static inline void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock) { } static inline void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock) { } #endif void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock); void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock); int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock, struct sk_msg *msg); /* * This specialized allocator has to be a macro for its allocations to be * accounted separately (to have a separate alloc_tag). The typecast is * intentional to enforce typesafety. */ #define sk_psock_init_link() \ kzalloc_obj(struct sk_psock_link, GFP_ATOMIC | __GFP_NOWARN) static inline void sk_psock_free_link(struct sk_psock_link *link) { kfree(link); } struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock); static inline void sk_psock_cork_free(struct sk_psock *psock) { if (psock->cork) { sk_msg_free(psock->sk, psock->cork); kfree(psock->cork); psock->cork = NULL; } } static inline void sk_psock_restore_proto(struct sock *sk, struct sk_psock *psock) { if (psock->psock_update_sk_prot) psock->psock_update_sk_prot(sk, psock, true); } static inline struct sk_psock *sk_psock_get(struct sock *sk) { struct sk_psock *psock; rcu_read_lock(); psock = sk_psock(sk); if (psock && !refcount_inc_not_zero(&psock->refcnt)) psock = NULL; rcu_read_unlock(); return psock; } void sk_psock_drop(struct sock *sk, struct sk_psock *psock); static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock) { if (refcount_dec_and_test(&psock->refcnt)) sk_psock_drop(sk, psock); } static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock) { read_lock_bh(&sk->sk_callback_lock); if (psock->saved_data_ready) psock->saved_data_ready(sk); else sk->sk_data_ready(sk); read_unlock_bh(&sk->sk_callback_lock); } static inline void psock_set_prog(struct bpf_prog **pprog, struct bpf_prog *prog) { prog = xchg(pprog, prog); if (prog) bpf_prog_put(prog); } static inline int psock_replace_prog(struct bpf_prog **pprog, struct bpf_prog *prog, struct bpf_prog *old) { if (cmpxchg(pprog, old, prog) != old) return -ENOENT; if (old) bpf_prog_put(old); return 0; } static inline void psock_progs_drop(struct sk_psock_progs *progs) { psock_set_prog(&progs->msg_parser, NULL); psock_set_prog(&progs->stream_parser, NULL); psock_set_prog(&progs->stream_verdict, NULL); psock_set_prog(&progs->skb_verdict, NULL); } int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb); static inline bool sk_psock_strp_enabled(struct sk_psock *psock) { if (!psock) return false; return !!psock->saved_data_ready; } /* for tcp only, sk is locked */ static inline ssize_t sk_psock_msg_inq(struct sock *sk) { struct sk_psock *psock; ssize_t inq = 0; psock = sk_psock_get(sk); if (likely(psock)) { inq = sk_psock_get_msg_len_nolock(psock); sk_psock_put(sk, psock); } return inq; } /* for udp only, sk is not locked */ static inline ssize_t sk_msg_first_len(struct sock *sk) { struct sk_psock *psock; struct sk_msg *msg; ssize_t inq = 0; psock = sk_psock_get(sk); if (likely(psock)) { spin_lock_bh(&psock->ingress_lock); msg = sk_psock_peek_msg_locked(psock); if (msg) inq = msg->sg.size; spin_unlock_bh(&psock->ingress_lock); sk_psock_put(sk, psock); } return inq; } #if IS_ENABLED(CONFIG_NET_SOCK_MSG) #define BPF_F_STRPARSER (1UL << 1) /* We only have two bits so far. */ #define BPF_F_PTR_MASK ~(BPF_F_INGRESS | BPF_F_STRPARSER) static inline bool skb_bpf_strparser(const struct sk_buff *skb) { unsigned long sk_redir = skb->_sk_redir; return sk_redir & BPF_F_STRPARSER; } static inline void skb_bpf_set_strparser(struct sk_buff *skb) { skb->_sk_redir |= BPF_F_STRPARSER; } static inline bool skb_bpf_ingress(const struct sk_buff *skb) { unsigned long sk_redir = skb->_sk_redir; return sk_redir & BPF_F_INGRESS; } static inline void skb_bpf_set_ingress(struct sk_buff *skb) { skb->_sk_redir |= BPF_F_INGRESS; } static inline void skb_bpf_set_redir(struct sk_buff *skb, struct sock *sk_redir, bool ingress) { skb->_sk_redir = (unsigned long)sk_redir; if (ingress) skb->_sk_redir |= BPF_F_INGRESS; } static inline struct sock *skb_bpf_redirect_fetch(const struct sk_buff *skb) { unsigned long sk_redir = skb->_sk_redir; return (struct sock *)(sk_redir & BPF_F_PTR_MASK); } static inline void skb_bpf_redirect_clear(struct sk_buff *skb) { skb->_sk_redir = 0; } #endif /* CONFIG_NET_SOCK_MSG */ #endif /* _LINUX_SKMSG_H */
14 14 11 11 1704 45 1 4 4 43 11 26 45 1705 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. */ #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/netdevice.h> #include <linux/if.h> #include <linux/if_vlan.h> #include <net/udp_tunnel.h> #include <net/sch_generic.h> #include <linux/netfilter.h> #include <rdma/ib_addr.h> #include "rxe.h" #include "rxe_net.h" #include "rxe_loc.h" static struct rxe_recv_sockets recv_sockets; #ifdef CONFIG_DEBUG_LOCK_ALLOC /* * lockdep can detect false positive circular dependencies * when there are user-space socket API users or in kernel * users switching between a tcp and rdma transport. * Maybe also switching between siw and rxe may cause * problems as per default sockets are only classified * by family and not by ip protocol. And there might * be different locks used between the application * and the low level sockets. * * Problems were seen with ksmbd.ko and cifs.ko, * switching transports, use git blame to find * more details. */ static struct lock_class_key rxe_recv_sk_key[2]; static struct lock_class_key rxe_recv_slock_key[2]; #endif /* CONFIG_DEBUG_LOCK_ALLOC */ static inline void rxe_reclassify_recv_socket(struct socket *sock) { #ifdef CONFIG_DEBUG_LOCK_ALLOC struct sock *sk = sock->sk; if (WARN_ON_ONCE(!sock_allow_reclassification(sk))) return; switch (sk->sk_family) { case AF_INET: sock_lock_init_class_and_name(sk, "slock-AF_INET-RDMA-RXE-RECV", &rxe_recv_slock_key[0], "sk_lock-AF_INET-RDMA-RXE-RECV", &rxe_recv_sk_key[0]); break; case AF_INET6: sock_lock_init_class_and_name(sk, "slock-AF_INET6-RDMA-RXE-RECV", &rxe_recv_slock_key[1], "sk_lock-AF_INET6-RDMA-RXE-RECV", &rxe_recv_sk_key[1]); break; default: WARN_ON_ONCE(1); return; } /* * sock_lock_init_class_and_name() calls * sk_owner_set(sk, THIS_MODULE); in order * to make sure the referenced global * variables rxe_recv_slock_key and * rxe_recv_sk_key are not removed * before the socket is closed. * * However this prevents rxe_net_exit() * from being called and 'rmmod rdma_rxe' * is refused because of the references. * * For the global sockets in recv_sockets, * we are sure that rxe_net_exit() will call * rxe_release_udp_tunnel -> udp_tunnel_sock_release. * * So we don't need the additional reference to * our own (THIS_MODULE). */ sk_owner_put(sk); /* * We also call sk_owner_clear() otherwise * sk_owner_put(sk) in sk_prot_free will * fail, which is called via * sk_free -> __sk_free -> sk_destruct * and sk_destruct calls __sk_destruct * directly or via call_rcu() * so sk_prot_free() might be called * after rxe_net_exit(). */ sk_owner_clear(sk); #endif /* CONFIG_DEBUG_LOCK_ALLOC */ } static struct dst_entry *rxe_find_route4(struct rxe_qp *qp, struct net_device *ndev, struct in_addr *saddr, struct in_addr *daddr) { struct rtable *rt; struct flowi4 fl = { { 0 } }; memset(&fl, 0, sizeof(fl)); fl.flowi4_oif = ndev->ifindex; memcpy(&fl.saddr, saddr, sizeof(*saddr)); memcpy(&fl.daddr, daddr, sizeof(*daddr)); fl.flowi4_proto = IPPROTO_UDP; rt = ip_route_output_key(&init_net, &fl); if (IS_ERR(rt)) { rxe_dbg_qp(qp, "no route to %pI4\n", &daddr->s_addr); return NULL; } return &rt->dst; } #if IS_ENABLED(CONFIG_IPV6) static struct dst_entry *rxe_find_route6(struct rxe_qp *qp, struct net_device *ndev, struct in6_addr *saddr, struct in6_addr *daddr) { struct dst_entry *ndst; struct flowi6 fl6 = { { 0 } }; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_oif = ndev->ifindex; memcpy(&fl6.saddr, saddr, sizeof(*saddr)); memcpy(&fl6.daddr, daddr, sizeof(*daddr)); fl6.flowi6_proto = IPPROTO_UDP; ndst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(recv_sockets.sk6->sk), recv_sockets.sk6->sk, &fl6, NULL); if (IS_ERR(ndst)) { rxe_dbg_qp(qp, "no route to %pI6\n", daddr); return NULL; } if (unlikely(ndst->error)) { rxe_dbg_qp(qp, "no route to %pI6\n", daddr); goto put; } return ndst; put: dst_release(ndst); return NULL; } #else static struct dst_entry *rxe_find_route6(struct rxe_qp *qp, struct net_device *ndev, struct in6_addr *saddr, struct in6_addr *daddr) { return NULL; } #endif static struct dst_entry *rxe_find_route(struct net_device *ndev, struct rxe_qp *qp, struct rxe_av *av) { struct dst_entry *dst = NULL; if (qp_type(qp) == IB_QPT_RC) dst = sk_dst_get(qp->sk->sk); if (!dst || !dst_check(dst, qp->dst_cookie)) { if (dst) dst_release(dst); if (av->network_type == RXE_NETWORK_TYPE_IPV4) { struct in_addr *saddr; struct in_addr *daddr; saddr = &av->sgid_addr._sockaddr_in.sin_addr; daddr = &av->dgid_addr._sockaddr_in.sin_addr; dst = rxe_find_route4(qp, ndev, saddr, daddr); } else if (av->network_type == RXE_NETWORK_TYPE_IPV6) { struct in6_addr *saddr6; struct in6_addr *daddr6; saddr6 = &av->sgid_addr._sockaddr_in6.sin6_addr; daddr6 = &av->dgid_addr._sockaddr_in6.sin6_addr; dst = rxe_find_route6(qp, ndev, saddr6, daddr6); #if IS_ENABLED(CONFIG_IPV6) if (dst) qp->dst_cookie = rt6_get_cookie((struct rt6_info *)dst); #endif } if (dst && (qp_type(qp) == IB_QPT_RC)) { dst_hold(dst); sk_dst_set(qp->sk->sk, dst); } } return dst; } static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb) { struct udphdr *udph; struct rxe_dev *rxe; struct net_device *ndev = skb->dev; struct rxe_pkt_info *pkt = SKB_TO_PKT(skb); /* takes a reference on rxe->ib_dev * drop when skb is freed */ rxe = rxe_get_dev_from_net(ndev); if (!rxe && is_vlan_dev(ndev)) rxe = rxe_get_dev_from_net(vlan_dev_real_dev(ndev)); if (!rxe) goto drop; if (skb_linearize(skb)) { ib_device_put(&rxe->ib_dev); goto drop; } udph = udp_hdr(skb); pkt->rxe = rxe; pkt->port_num = 1; pkt->hdr = (u8 *)(udph + 1); pkt->mask = RXE_GRH_MASK; pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph); /* remove udp header */ skb_pull(skb, sizeof(struct udphdr)); rxe_rcv(skb); return 0; drop: kfree_skb(skb); return 0; } static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port, bool ipv6) { int err; struct socket *sock; struct udp_port_cfg udp_cfg = { }; struct udp_tunnel_sock_cfg tnl_cfg = { }; if (ipv6) { udp_cfg.family = AF_INET6; udp_cfg.ipv6_v6only = 1; } else { udp_cfg.family = AF_INET; } udp_cfg.local_udp_port = port; /* Create UDP socket */ err = udp_sock_create(net, &udp_cfg, &sock); if (err < 0) return ERR_PTR(err); rxe_reclassify_recv_socket(sock); tnl_cfg.encap_type = 1; tnl_cfg.encap_rcv = rxe_udp_encap_recv; /* Setup UDP tunnel */ setup_udp_tunnel_sock(net, sock, &tnl_cfg); return sock; } static void rxe_release_udp_tunnel(struct socket *sk) { if (sk) udp_tunnel_sock_release(sk); } static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port, __be16 dst_port) { struct udphdr *udph; __skb_push(skb, sizeof(*udph)); skb_reset_transport_header(skb); udph = udp_hdr(skb); udph->dest = dst_port; udph->source = src_port; udph->len = htons(skb->len); udph->check = 0; } static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb, __be32 saddr, __be32 daddr, __u8 proto, __u8 tos, __u8 ttl, __be16 df, bool xnet) { struct iphdr *iph; skb_scrub_packet(skb, xnet); skb_clear_hash(skb); skb_dst_set(skb, dst_clone(dst)); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = IPVERSION; iph->ihl = sizeof(struct iphdr) >> 2; iph->tot_len = htons(skb->len); iph->frag_off = df; iph->protocol = proto; iph->tos = tos; iph->daddr = daddr; iph->saddr = saddr; iph->ttl = ttl; __ip_select_ident(dev_net(dst->dev), iph, skb_shinfo(skb)->gso_segs ?: 1); } static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb, struct in6_addr *saddr, struct in6_addr *daddr, __u8 proto, __u8 prio, __u8 ttl) { struct ipv6hdr *ip6h; memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED); skb_dst_set(skb, dst_clone(dst)); __skb_push(skb, sizeof(*ip6h)); skb_reset_network_header(skb); ip6h = ipv6_hdr(skb); ip6_flow_hdr(ip6h, prio, htonl(0)); ip6h->payload_len = htons(skb->len); ip6h->nexthdr = proto; ip6h->hop_limit = ttl; ip6h->daddr = *daddr; ip6h->saddr = *saddr; ip6h->payload_len = htons(skb->len - sizeof(*ip6h)); } static int prepare4(struct rxe_av *av, struct rxe_pkt_info *pkt, struct sk_buff *skb) { struct rxe_qp *qp = pkt->qp; struct dst_entry *dst; bool xnet = false; __be16 df = htons(IP_DF); struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr; struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr; dst = rxe_find_route(skb->dev, qp, av); if (!dst) { rxe_dbg_qp(qp, "Host not reachable\n"); return -EHOSTUNREACH; } prepare_udp_hdr(skb, cpu_to_be16(qp->src_port), cpu_to_be16(ROCE_V2_UDP_DPORT)); prepare_ipv4_hdr(dst, skb, saddr->s_addr, daddr->s_addr, IPPROTO_UDP, av->grh.traffic_class, av->grh.hop_limit, df, xnet); dst_release(dst); return 0; } static int prepare6(struct rxe_av *av, struct rxe_pkt_info *pkt, struct sk_buff *skb) { struct rxe_qp *qp = pkt->qp; struct dst_entry *dst; struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr; struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr; dst = rxe_find_route(skb->dev, qp, av); if (!dst) { rxe_dbg_qp(qp, "Host not reachable\n"); return -EHOSTUNREACH; } prepare_udp_hdr(skb, cpu_to_be16(qp->src_port), cpu_to_be16(ROCE_V2_UDP_DPORT)); prepare_ipv6_hdr(dst, skb, saddr, daddr, IPPROTO_UDP, av->grh.traffic_class, av->grh.hop_limit); dst_release(dst); return 0; } int rxe_prepare(struct rxe_av *av, struct rxe_pkt_info *pkt, struct sk_buff *skb) { int err = 0; if (skb->protocol == htons(ETH_P_IP)) err = prepare4(av, pkt, skb); else if (skb->protocol == htons(ETH_P_IPV6)) err = prepare6(av, pkt, skb); if (ether_addr_equal(skb->dev->dev_addr, av->dmac)) pkt->mask |= RXE_LOOPBACK_MASK; return err; } static void rxe_skb_tx_dtor(struct sk_buff *skb) { struct rxe_qp *qp = skb->sk->sk_user_data; int skb_out; skb_out = atomic_dec_return(&qp->skb_out); if (unlikely(qp->need_req_skb && skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW)) rxe_sched_task(&qp->send_task); rxe_put(qp); sock_put(skb->sk); } static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt) { int err; struct sock *sk = pkt->qp->sk->sk; sock_hold(sk); skb->sk = sk; skb->destructor = rxe_skb_tx_dtor; rxe_get(pkt->qp); atomic_inc(&pkt->qp->skb_out); if (skb->protocol == htons(ETH_P_IP)) err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); else err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); return err; } /* fix up a send packet to match the packets * received from UDP before looping them back */ static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt) { struct sock *sk = pkt->qp->sk->sk; memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt)); sock_hold(sk); skb->sk = sk; skb->destructor = rxe_skb_tx_dtor; rxe_get(pkt->qp); atomic_inc(&pkt->qp->skb_out); if (skb->protocol == htons(ETH_P_IP)) skb_pull(skb, sizeof(struct iphdr)); else skb_pull(skb, sizeof(struct ipv6hdr)); if (WARN_ON(!ib_device_try_get(&pkt->rxe->ib_dev))) { kfree_skb(skb); return -EIO; } /* remove udp header */ skb_pull(skb, sizeof(struct udphdr)); rxe_rcv(skb); return 0; } int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct sk_buff *skb) { int err; int is_request = pkt->mask & RXE_REQ_MASK; struct rxe_dev *rxe = to_rdev(qp->ibqp.device); unsigned long flags; spin_lock_irqsave(&qp->state_lock, flags); if ((is_request && (qp_state(qp) < IB_QPS_RTS)) || (!is_request && (qp_state(qp) < IB_QPS_RTR))) { spin_unlock_irqrestore(&qp->state_lock, flags); rxe_dbg_qp(qp, "Packet dropped. QP is not in ready state\n"); goto drop; } spin_unlock_irqrestore(&qp->state_lock, flags); rxe_icrc_generate(skb, pkt); if (pkt->mask & RXE_LOOPBACK_MASK) err = rxe_loopback(skb, pkt); else err = rxe_send(skb, pkt); if (err) { rxe_counter_inc(rxe, RXE_CNT_SEND_ERR); return err; } rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS); goto done; drop: kfree_skb(skb); err = 0; done: return err; } struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, int paylen, struct rxe_pkt_info *pkt) { unsigned int hdr_len; struct sk_buff *skb = NULL; struct net_device *ndev; const struct ib_gid_attr *attr; const int port_num = 1; attr = rdma_get_gid_attr(&rxe->ib_dev, port_num, av->grh.sgid_index); if (IS_ERR(attr)) return NULL; if (av->network_type == RXE_NETWORK_TYPE_IPV4) hdr_len = ETH_HLEN + sizeof(struct udphdr) + sizeof(struct iphdr); else hdr_len = ETH_HLEN + sizeof(struct udphdr) + sizeof(struct ipv6hdr); rcu_read_lock(); ndev = rdma_read_gid_attr_ndev_rcu(attr); if (IS_ERR(ndev)) { rcu_read_unlock(); goto out; } skb = alloc_skb(paylen + hdr_len + LL_RESERVED_SPACE(ndev), GFP_ATOMIC); if (unlikely(!skb)) { rcu_read_unlock(); goto out; } /* Add time stamp to skb. */ skb->tstamp = ktime_get(); skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(ndev)); /* FIXME: hold reference to this netdev until life of this skb. */ skb->dev = ndev; rcu_read_unlock(); if (av->network_type == RXE_NETWORK_TYPE_IPV4) skb->protocol = htons(ETH_P_IP); else skb->protocol = htons(ETH_P_IPV6); pkt->rxe = rxe; pkt->port_num = port_num; pkt->hdr = skb_put(skb, paylen); pkt->mask |= RXE_GRH_MASK; out: rdma_put_gid_attr(attr); return skb; } /* * this is required by rxe_cfg to match rxe devices in * /sys/class/infiniband up with their underlying ethernet devices */ const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num) { struct net_device *ndev; char *ndev_name; ndev = rxe_ib_device_get_netdev(&rxe->ib_dev); if (!ndev) return NULL; ndev_name = ndev->name; dev_put(ndev); return ndev_name; } int rxe_net_add(const char *ibdev_name, struct net_device *ndev) { int err; struct rxe_dev *rxe = NULL; rxe = ib_alloc_device(rxe_dev, ib_dev); if (!rxe) return -ENOMEM; ib_mark_name_assigned_by_user(&rxe->ib_dev); err = rxe_add(rxe, ndev->mtu, ibdev_name, ndev); if (err) { ib_dealloc_device(&rxe->ib_dev); return err; } return 0; } static void rxe_port_event(struct rxe_dev *rxe, enum ib_event_type event) { struct ib_event ev; ev.device = &rxe->ib_dev; ev.element.port_num = 1; ev.event = event; ib_dispatch_event(&ev); } /* Caller must hold net_info_lock */ void rxe_port_up(struct rxe_dev *rxe) { rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE); dev_info(&rxe->ib_dev.dev, "set active\n"); } /* Caller must hold net_info_lock */ void rxe_port_down(struct rxe_dev *rxe) { rxe_port_event(rxe, IB_EVENT_PORT_ERR); rxe_counter_inc(rxe, RXE_CNT_LINK_DOWNED); dev_info(&rxe->ib_dev.dev, "set down\n"); } void rxe_set_port_state(struct rxe_dev *rxe) { struct net_device *ndev; ndev = rxe_ib_device_get_netdev(&rxe->ib_dev); if (!ndev) return; if (ib_get_curr_port_state(ndev) == IB_PORT_ACTIVE) rxe_port_up(rxe); else rxe_port_down(rxe); dev_put(ndev); } static int rxe_notify(struct notifier_block *not_blk, unsigned long event, void *arg) { struct net_device *ndev = netdev_notifier_info_to_dev(arg); struct rxe_dev *rxe = rxe_get_dev_from_net(ndev); if (!rxe) return NOTIFY_OK; switch (event) { case NETDEV_UNREGISTER: ib_unregister_device_queued(&rxe->ib_dev); break; case NETDEV_CHANGEMTU: rxe_dbg_dev(rxe, "%s changed mtu to %d\n", ndev->name, ndev->mtu); rxe_set_mtu(rxe, ndev->mtu); break; case NETDEV_DOWN: case NETDEV_CHANGE: if (ib_get_curr_port_state(ndev) == IB_PORT_DOWN) rxe_counter_inc(rxe, RXE_CNT_LINK_DOWNED); break; case NETDEV_REBOOT: case NETDEV_GOING_DOWN: case NETDEV_CHANGEADDR: case NETDEV_CHANGENAME: case NETDEV_FEAT_CHANGE: default: rxe_dbg_dev(rxe, "ignoring netdev event = %ld for %s\n", event, ndev->name); break; } ib_device_put(&rxe->ib_dev); return NOTIFY_OK; } static struct notifier_block rxe_net_notifier = { .notifier_call = rxe_notify, }; static int rxe_net_ipv4_init(void) { recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net, htons(ROCE_V2_UDP_DPORT), false); if (IS_ERR(recv_sockets.sk4)) { recv_sockets.sk4 = NULL; pr_err("Failed to create IPv4 UDP tunnel\n"); return -1; } return 0; } static int rxe_net_ipv6_init(void) { #if IS_ENABLED(CONFIG_IPV6) recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net, htons(ROCE_V2_UDP_DPORT), true); if (PTR_ERR(recv_sockets.sk6) == -EAFNOSUPPORT) { recv_sockets.sk6 = NULL; pr_warn("IPv6 is not supported, can not create a UDPv6 socket\n"); return 0; } if (IS_ERR(recv_sockets.sk6)) { recv_sockets.sk6 = NULL; pr_err("Failed to create IPv6 UDP tunnel\n"); return -1; } #endif return 0; } void rxe_net_exit(void) { rxe_release_udp_tunnel(recv_sockets.sk6); rxe_release_udp_tunnel(recv_sockets.sk4); unregister_netdevice_notifier(&rxe_net_notifier); } int rxe_net_init(void) { int err; recv_sockets.sk6 = NULL; err = rxe_net_ipv4_init(); if (err) return err; err = rxe_net_ipv6_init(); if (err) goto err_out; err = register_netdevice_notifier(&rxe_net_notifier); if (err) { pr_err("Failed to register netdev notifier\n"); goto err_out; } return 0; err_out: rxe_net_exit(); return err; }
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM sunrpc #if !defined(_TRACE_SUNRPC_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_SUNRPC_H #include <linux/sunrpc/sched.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/svc.h> #include <linux/sunrpc/xprtsock.h> #include <linux/sunrpc/svc_xprt.h> #include <net/tcp_states.h> #include <linux/net.h> #include <linux/tracepoint.h> #include <trace/misc/sunrpc.h> TRACE_DEFINE_ENUM(SOCK_STREAM); TRACE_DEFINE_ENUM(SOCK_DGRAM); TRACE_DEFINE_ENUM(SOCK_RAW); TRACE_DEFINE_ENUM(SOCK_RDM); TRACE_DEFINE_ENUM(SOCK_SEQPACKET); TRACE_DEFINE_ENUM(SOCK_PACKET); #define show_socket_type(type) \ __print_symbolic(type, \ { SOCK_STREAM, "STREAM" }, \ { SOCK_DGRAM, "DGRAM" }, \ { SOCK_RAW, "RAW" }, \ { SOCK_RDM, "RDM" }, \ { SOCK_SEQPACKET, "SEQPACKET" }, \ { SOCK_PACKET, "PACKET" }) /* This list is known to be incomplete, add new enums as needed. */ TRACE_DEFINE_ENUM(AF_UNSPEC); TRACE_DEFINE_ENUM(AF_UNIX); TRACE_DEFINE_ENUM(AF_LOCAL); TRACE_DEFINE_ENUM(AF_INET); TRACE_DEFINE_ENUM(AF_INET6); #define rpc_show_address_family(family) \ __print_symbolic(family, \ { AF_UNSPEC, "AF_UNSPEC" }, \ { AF_UNIX, "AF_UNIX" }, \ { AF_LOCAL, "AF_LOCAL" }, \ { AF_INET, "AF_INET" }, \ { AF_INET6, "AF_INET6" }) DECLARE_EVENT_CLASS(rpc_xdr_buf_class, TP_PROTO( const struct rpc_task *task, const struct xdr_buf *xdr ), TP_ARGS(task, xdr), TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) __field(const void *, head_base) __field(size_t, head_len) __field(const void *, tail_base) __field(size_t, tail_len) __field(unsigned int, page_base) __field(unsigned int, page_len) __field(unsigned int, msg_len) ), TP_fast_assign( __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client ? task->tk_client->cl_clid : -1; __entry->head_base = xdr->head[0].iov_base; __entry->head_len = xdr->head[0].iov_len; __entry->tail_base = xdr->tail[0].iov_base; __entry->tail_len = xdr->tail[0].iov_len; __entry->page_base = xdr->page_base; __entry->page_len = xdr->page_len; __entry->msg_len = xdr->len; ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " head=[%p,%zu] page=%u(%u) tail=[%p,%zu] len=%u", __entry->task_id, __entry->client_id, __entry->head_base, __entry->head_len, __entry->page_len, __entry->page_base, __entry->tail_base, __entry->tail_len, __entry->msg_len ) ); #define DEFINE_RPCXDRBUF_EVENT(name) \ DEFINE_EVENT(rpc_xdr_buf_class, \ rpc_xdr_##name, \ TP_PROTO( \ const struct rpc_task *task, \ const struct xdr_buf *xdr \ ), \ TP_ARGS(task, xdr)) DEFINE_RPCXDRBUF_EVENT(sendto); DEFINE_RPCXDRBUF_EVENT(recvfrom); DEFINE_RPCXDRBUF_EVENT(reply_pages); DECLARE_EVENT_CLASS(rpc_clnt_class, TP_PROTO( const struct rpc_clnt *clnt ), TP_ARGS(clnt), TP_STRUCT__entry( __field(unsigned int, client_id) ), TP_fast_assign( __entry->client_id = clnt->cl_clid; ), TP_printk("client=" SUNRPC_TRACE_CLID_SPECIFIER, __entry->client_id) ); #define DEFINE_RPC_CLNT_EVENT(name) \ DEFINE_EVENT(rpc_clnt_class, \ rpc_clnt_##name, \ TP_PROTO( \ const struct rpc_clnt *clnt \ ), \ TP_ARGS(clnt)) DEFINE_RPC_CLNT_EVENT(free); DEFINE_RPC_CLNT_EVENT(killall); DEFINE_RPC_CLNT_EVENT(shutdown); DEFINE_RPC_CLNT_EVENT(release); DEFINE_RPC_CLNT_EVENT(replace_xprt); DEFINE_RPC_CLNT_EVENT(replace_xprt_err); TRACE_DEFINE_ENUM(RPC_XPRTSEC_NONE); TRACE_DEFINE_ENUM(RPC_XPRTSEC_TLS_X509); #define rpc_show_xprtsec_policy(policy) \ __print_symbolic(policy, \ { RPC_XPRTSEC_NONE, "none" }, \ { RPC_XPRTSEC_TLS_ANON, "tls-anon" }, \ { RPC_XPRTSEC_TLS_X509, "tls-x509" }) #define rpc_show_create_flags(flags) \ __print_flags(flags, "|", \ { RPC_CLNT_CREATE_HARDRTRY, "HARDRTRY" }, \ { RPC_CLNT_CREATE_AUTOBIND, "AUTOBIND" }, \ { RPC_CLNT_CREATE_NONPRIVPORT, "NONPRIVPORT" }, \ { RPC_CLNT_CREATE_NOPING, "NOPING" }, \ { RPC_CLNT_CREATE_DISCRTRY, "DISCRTRY" }, \ { RPC_CLNT_CREATE_QUIET, "QUIET" }, \ { RPC_CLNT_CREATE_INFINITE_SLOTS, \ "INFINITE_SLOTS" }, \ { RPC_CLNT_CREATE_NO_IDLE_TIMEOUT, \ "NO_IDLE_TIMEOUT" }, \ { RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT, \ "NO_RETRANS_TIMEOUT" }, \ { RPC_CLNT_CREATE_SOFTERR, "SOFTERR" }, \ { RPC_CLNT_CREATE_REUSEPORT, "REUSEPORT" }) TRACE_EVENT(rpc_clnt_new, TP_PROTO( const struct rpc_clnt *clnt, const struct rpc_xprt *xprt, const struct rpc_create_args *args ), TP_ARGS(clnt, xprt, args), TP_STRUCT__entry( __field(unsigned int, client_id) __field(unsigned long, xprtsec) __field(unsigned long, flags) __string(program, clnt->cl_program->name) __string(server, xprt->servername) __string(addr, xprt->address_strings[RPC_DISPLAY_ADDR]) __string(port, xprt->address_strings[RPC_DISPLAY_PORT]) ), TP_fast_assign( __entry->client_id = clnt->cl_clid; __entry->xprtsec = args->xprtsec.policy; __entry->flags = args->flags; __assign_str(program); __assign_str(server); __assign_str(addr); __assign_str(port); ), TP_printk("client=" SUNRPC_TRACE_CLID_SPECIFIER " peer=[%s]:%s" " program=%s server=%s xprtsec=%s flags=%s", __entry->client_id, __get_str(addr), __get_str(port), __get_str(program), __get_str(server), rpc_show_xprtsec_policy(__entry->xprtsec), rpc_show_create_flags(__entry->flags) ) ); TRACE_EVENT(rpc_clnt_new_err, TP_PROTO( const char *program, const char *server, int error ), TP_ARGS(program, server, error), TP_STRUCT__entry( __field(int, error) __string(program, program) __string(server, server) ), TP_fast_assign( __entry->error = error; __assign_str(program); __assign_str(server); ), TP_printk("program=%s server=%s error=%d", __get_str(program), __get_str(server), __entry->error) ); TRACE_EVENT(rpc_clnt_clone_err, TP_PROTO( const struct rpc_clnt *clnt, int error ), TP_ARGS(clnt, error), TP_STRUCT__entry( __field(unsigned int, client_id) __field(int, error) ), TP_fast_assign( __entry->client_id = clnt->cl_clid; __entry->error = error; ), TP_printk("client=" SUNRPC_TRACE_CLID_SPECIFIER " error=%d", __entry->client_id, __entry->error) ); TRACE_DEFINE_ENUM(RPC_AUTH_OK); TRACE_DEFINE_ENUM(RPC_AUTH_BADCRED); TRACE_DEFINE_ENUM(RPC_AUTH_REJECTEDCRED); TRACE_DEFINE_ENUM(RPC_AUTH_BADVERF); TRACE_DEFINE_ENUM(RPC_AUTH_REJECTEDVERF); TRACE_DEFINE_ENUM(RPC_AUTH_TOOWEAK); TRACE_DEFINE_ENUM(RPCSEC_GSS_CREDPROBLEM); TRACE_DEFINE_ENUM(RPCSEC_GSS_CTXPROBLEM); #define rpc_show_auth_stat(status) \ __print_symbolic(status, \ { RPC_AUTH_OK, "AUTH_OK" }, \ { RPC_AUTH_BADCRED, "BADCRED" }, \ { RPC_AUTH_REJECTEDCRED, "REJECTEDCRED" }, \ { RPC_AUTH_BADVERF, "BADVERF" }, \ { RPC_AUTH_REJECTEDVERF, "REJECTEDVERF" }, \ { RPC_AUTH_TOOWEAK, "TOOWEAK" }, \ { RPCSEC_GSS_CREDPROBLEM, "GSS_CREDPROBLEM" }, \ { RPCSEC_GSS_CTXPROBLEM, "GSS_CTXPROBLEM" }) \ DECLARE_EVENT_CLASS(rpc_task_status, TP_PROTO(const struct rpc_task *task), TP_ARGS(task), TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) __field(int, status) ), TP_fast_assign( __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; __entry->status = task->tk_status; ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " status=%d", __entry->task_id, __entry->client_id, __entry->status) ); #define DEFINE_RPC_STATUS_EVENT(name) \ DEFINE_EVENT(rpc_task_status, rpc_##name##_status, \ TP_PROTO( \ const struct rpc_task *task \ ), \ TP_ARGS(task)) DEFINE_RPC_STATUS_EVENT(call); DEFINE_RPC_STATUS_EVENT(connect); DEFINE_RPC_STATUS_EVENT(timeout); DEFINE_RPC_STATUS_EVENT(retry_refresh); DEFINE_RPC_STATUS_EVENT(refresh); TRACE_EVENT(rpc_request, TP_PROTO(const struct rpc_task *task), TP_ARGS(task), TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) __field(int, version) __field(bool, async) __string(progname, task->tk_client->cl_program->name) __string(procname, rpc_proc_name(task)) ), TP_fast_assign( __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; __entry->version = task->tk_client->cl_vers; __entry->async = RPC_IS_ASYNC(task); __assign_str(progname); __assign_str(procname); ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " %sv%d %s (%ssync)", __entry->task_id, __entry->client_id, __get_str(progname), __entry->version, __get_str(procname), __entry->async ? "a": "" ) ); #define rpc_show_task_flags(flags) \ __print_flags(flags, "|", \ { RPC_TASK_ASYNC, "ASYNC" }, \ { RPC_TASK_SWAPPER, "SWAPPER" }, \ { RPC_TASK_MOVEABLE, "MOVEABLE" }, \ { RPC_TASK_NULLCREDS, "NULLCREDS" }, \ { RPC_CALL_MAJORSEEN, "MAJORSEEN" }, \ { RPC_TASK_NETUNREACH_FATAL, "NETUNREACH_FATAL"}, \ { RPC_TASK_DYNAMIC, "DYNAMIC" }, \ { RPC_TASK_NO_ROUND_ROBIN, "NO_ROUND_ROBIN" }, \ { RPC_TASK_SOFT, "SOFT" }, \ { RPC_TASK_SOFTCONN, "SOFTCONN" }, \ { RPC_TASK_SENT, "SENT" }, \ { RPC_TASK_TIMEOUT, "TIMEOUT" }, \ { RPC_TASK_NOCONNECT, "NOCONNECT" }, \ { RPC_TASK_NO_RETRANS_TIMEOUT, "NORTO" }, \ { RPC_TASK_CRED_NOREF, "CRED_NOREF" }) #define rpc_show_runstate(flags) \ __print_flags(flags, "|", \ { (1UL << RPC_TASK_RUNNING), "RUNNING" }, \ { (1UL << RPC_TASK_QUEUED), "QUEUED" }, \ { (1UL << RPC_TASK_ACTIVE), "ACTIVE" }, \ { (1UL << RPC_TASK_NEED_XMIT), "NEED_XMIT" }, \ { (1UL << RPC_TASK_NEED_RECV), "NEED_RECV" }, \ { (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" }) DECLARE_EVENT_CLASS(rpc_task_running, TP_PROTO(const struct rpc_task *task, const void *action), TP_ARGS(task, action), TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) __field(const void *, action) __field(unsigned long, runstate) __field(int, status) __field(unsigned short, flags) ), TP_fast_assign( __entry->client_id = task->tk_client ? task->tk_client->cl_clid : -1; __entry->task_id = task->tk_pid; __entry->action = action; __entry->runstate = task->tk_runstate; __entry->status = task->tk_status; __entry->flags = task->tk_flags; ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " flags=%s runstate=%s status=%d action=%ps", __entry->task_id, __entry->client_id, rpc_show_task_flags(__entry->flags), rpc_show_runstate(__entry->runstate), __entry->status, __entry->action ) ); #define DEFINE_RPC_RUNNING_EVENT(name) \ DEFINE_EVENT(rpc_task_running, rpc_task_##name, \ TP_PROTO( \ const struct rpc_task *task, \ const void *action \ ), \ TP_ARGS(task, action)) DEFINE_RPC_RUNNING_EVENT(begin); DEFINE_RPC_RUNNING_EVENT(run_action); DEFINE_RPC_RUNNING_EVENT(sync_sleep); DEFINE_RPC_RUNNING_EVENT(sync_wake); DEFINE_RPC_RUNNING_EVENT(complete); DEFINE_RPC_RUNNING_EVENT(timeout); DEFINE_RPC_RUNNING_EVENT(signalled); DEFINE_RPC_RUNNING_EVENT(end); DEFINE_RPC_RUNNING_EVENT(call_done); DECLARE_EVENT_CLASS(rpc_task_queued, TP_PROTO(const struct rpc_task *task, const struct rpc_wait_queue *q), TP_ARGS(task, q), TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) __field(unsigned long, timeout) __field(unsigned long, runstate) __field(int, status) __field(unsigned short, flags) __string(q_name, rpc_qname(q)) ), TP_fast_assign( __entry->client_id = task->tk_client ? task->tk_client->cl_clid : -1; __entry->task_id = task->tk_pid; __entry->timeout = rpc_task_timeout(task); __entry->runstate = task->tk_runstate; __entry->status = task->tk_status; __entry->flags = task->tk_flags; __assign_str(q_name); ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " flags=%s runstate=%s status=%d timeout=%lu queue=%s", __entry->task_id, __entry->client_id, rpc_show_task_flags(__entry->flags), rpc_show_runstate(__entry->runstate), __entry->status, __entry->timeout, __get_str(q_name) ) ); #define DEFINE_RPC_QUEUED_EVENT(name) \ DEFINE_EVENT(rpc_task_queued, rpc_task_##name, \ TP_PROTO( \ const struct rpc_task *task, \ const struct rpc_wait_queue *q \ ), \ TP_ARGS(task, q)) DEFINE_RPC_QUEUED_EVENT(sleep); DEFINE_RPC_QUEUED_EVENT(wakeup); DECLARE_EVENT_CLASS(rpc_failure, TP_PROTO(const struct rpc_task *task), TP_ARGS(task), TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) ), TP_fast_assign( __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER, __entry->task_id, __entry->client_id) ); #define DEFINE_RPC_FAILURE(name) \ DEFINE_EVENT(rpc_failure, rpc_bad_##name, \ TP_PROTO( \ const struct rpc_task *task \ ), \ TP_ARGS(task)) DEFINE_RPC_FAILURE(callhdr); DEFINE_RPC_FAILURE(verifier); DECLARE_EVENT_CLASS(rpc_reply_event, TP_PROTO( const struct rpc_task *task ), TP_ARGS(task), TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) __field(u32, xid) __string(progname, task->tk_client->cl_program->name) __field(u32, version) __string(procname, rpc_proc_name(task)) __string(servername, task->tk_xprt->servername) ), TP_fast_assign( __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid); __assign_str(progname); __entry->version = task->tk_client->cl_vers; __assign_str(procname); __assign_str(servername); ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " server=%s xid=0x%08x %sv%d %s", __entry->task_id, __entry->client_id, __get_str(servername), __entry->xid, __get_str(progname), __entry->version, __get_str(procname)) ) #define DEFINE_RPC_REPLY_EVENT(name) \ DEFINE_EVENT(rpc_reply_event, rpc__##name, \ TP_PROTO( \ const struct rpc_task *task \ ), \ TP_ARGS(task)) DEFINE_RPC_REPLY_EVENT(prog_unavail); DEFINE_RPC_REPLY_EVENT(prog_mismatch); DEFINE_RPC_REPLY_EVENT(proc_unavail); DEFINE_RPC_REPLY_EVENT(garbage_args); DEFINE_RPC_REPLY_EVENT(unparsable); DEFINE_RPC_REPLY_EVENT(mismatch); DEFINE_RPC_REPLY_EVENT(stale_creds); DEFINE_RPC_REPLY_EVENT(bad_creds); DEFINE_RPC_REPLY_EVENT(auth_tooweak); #define DEFINE_RPCB_ERROR_EVENT(name) \ DEFINE_EVENT(rpc_reply_event, rpcb_##name##_err, \ TP_PROTO( \ const struct rpc_task *task \ ), \ TP_ARGS(task)) DEFINE_RPCB_ERROR_EVENT(prog_unavail); DEFINE_RPCB_ERROR_EVENT(timeout); DEFINE_RPCB_ERROR_EVENT(bind_version); DEFINE_RPCB_ERROR_EVENT(unreachable); DEFINE_RPCB_ERROR_EVENT(unrecognized); TRACE_EVENT(rpc_buf_alloc, TP_PROTO( const struct rpc_task *task, int status ), TP_ARGS(task, status), TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) __field(size_t, callsize) __field(size_t, recvsize) __field(int, status) ), TP_fast_assign( __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; __entry->callsize = task->tk_rqstp->rq_callsize; __entry->recvsize = task->tk_rqstp->rq_rcvsize; __entry->status = status; ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " callsize=%zu recvsize=%zu status=%d", __entry->task_id, __entry->client_id, __entry->callsize, __entry->recvsize, __entry->status ) ); TRACE_EVENT(rpc_call_rpcerror, TP_PROTO( const struct rpc_task *task, int tk_status, int rpc_status ), TP_ARGS(task, tk_status, rpc_status), TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) __field(int, tk_status) __field(int, rpc_status) ), TP_fast_assign( __entry->client_id = task->tk_client->cl_clid; __entry->task_id = task->tk_pid; __entry->tk_status = tk_status; __entry->rpc_status = rpc_status; ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " tk_status=%d rpc_status=%d", __entry->task_id, __entry->client_id, __entry->tk_status, __entry->rpc_status) ); TRACE_EVENT(rpc_stats_latency, TP_PROTO( const struct rpc_task *task, ktime_t backlog, ktime_t rtt, ktime_t execute ), TP_ARGS(task, backlog, rtt, execute), TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) __field(u32, xid) __field(int, version) __string(progname, task->tk_client->cl_program->name) __string(procname, rpc_proc_name(task)) __field(unsigned long, backlog) __field(unsigned long, rtt) __field(unsigned long, execute) __field(u32, xprt_id) ), TP_fast_assign( __entry->client_id = task->tk_client->cl_clid; __entry->task_id = task->tk_pid; __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid); __entry->version = task->tk_client->cl_vers; __assign_str(progname); __assign_str(procname); __entry->backlog = ktime_to_us(backlog); __entry->rtt = ktime_to_us(rtt); __entry->execute = ktime_to_us(execute); __entry->xprt_id = task->tk_xprt->id; ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x %sv%d %s backlog=%lu rtt=%lu execute=%lu" " xprt_id=%d", __entry->task_id, __entry->client_id, __entry->xid, __get_str(progname), __entry->version, __get_str(procname), __entry->backlog, __entry->rtt, __entry->execute, __entry->xprt_id) ); TRACE_EVENT(rpc_xdr_overflow, TP_PROTO( const struct xdr_stream *xdr, size_t requested ), TP_ARGS(xdr, requested), TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) __field(int, version) __field(size_t, requested) __field(const void *, end) __field(const void *, p) __field(const void *, head_base) __field(size_t, head_len) __field(const void *, tail_base) __field(size_t, tail_len) __field(unsigned int, page_len) __field(unsigned int, len) __string(progname, xdr->rqst ? xdr->rqst->rq_task->tk_client->cl_program->name : "unknown") __string(procedure, xdr->rqst ? xdr->rqst->rq_task->tk_msg.rpc_proc->p_name : "unknown") ), TP_fast_assign( if (xdr->rqst) { const struct rpc_task *task = xdr->rqst->rq_task; __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; __assign_str(progname); __entry->version = task->tk_client->cl_vers; __assign_str(procedure); } else { __entry->task_id = -1; __entry->client_id = -1; __assign_str(progname); __entry->version = 0; __assign_str(procedure); } __entry->requested = requested; __entry->end = xdr->end; __entry->p = xdr->p; __entry->head_base = xdr->buf->head[0].iov_base, __entry->head_len = xdr->buf->head[0].iov_len, __entry->page_len = xdr->buf->page_len, __entry->tail_base = xdr->buf->tail[0].iov_base, __entry->tail_len = xdr->buf->tail[0].iov_len, __entry->len = xdr->buf->len; ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " %sv%d %s requested=%zu p=%p end=%p xdr=[%p,%zu]/%u/[%p,%zu]/%u", __entry->task_id, __entry->client_id, __get_str(progname), __entry->version, __get_str(procedure), __entry->requested, __entry->p, __entry->end, __entry->head_base, __entry->head_len, __entry->page_len, __entry->tail_base, __entry->tail_len, __entry->len ) ); TRACE_EVENT(rpc_xdr_alignment, TP_PROTO( const struct xdr_stream *xdr, size_t offset, unsigned int copied ), TP_ARGS(xdr, offset, copied), TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) __field(int, version) __field(size_t, offset) __field(unsigned int, copied) __field(const void *, head_base) __field(size_t, head_len) __field(const void *, tail_base) __field(size_t, tail_len) __field(unsigned int, page_len) __field(unsigned int, len) __string(progname, xdr->rqst->rq_task->tk_client->cl_program->name) __string(procedure, xdr->rqst->rq_task->tk_msg.rpc_proc->p_name) ), TP_fast_assign( const struct rpc_task *task = xdr->rqst->rq_task; __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; __assign_str(progname); __entry->version = task->tk_client->cl_vers; __assign_str(procedure); __entry->offset = offset; __entry->copied = copied; __entry->head_base = xdr->buf->head[0].iov_base, __entry->head_len = xdr->buf->head[0].iov_len, __entry->page_len = xdr->buf->page_len, __entry->tail_base = xdr->buf->tail[0].iov_base, __entry->tail_len = xdr->buf->tail[0].iov_len, __entry->len = xdr->buf->len; ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " %sv%d %s offset=%zu copied=%u xdr=[%p,%zu]/%u/[%p,%zu]/%u", __entry->task_id, __entry->client_id, __get_str(progname), __entry->version, __get_str(procedure), __entry->offset, __entry->copied, __entry->head_base, __entry->head_len, __entry->page_len, __entry->tail_base, __entry->tail_len, __entry->len ) ); /* * First define the enums in the below macros to be exported to userspace * via TRACE_DEFINE_ENUM(). */ #undef EM #undef EMe #define EM(a, b) TRACE_DEFINE_ENUM(a); #define EMe(a, b) TRACE_DEFINE_ENUM(a); #define RPC_SHOW_SOCKET \ EM( SS_FREE, "FREE" ) \ EM( SS_UNCONNECTED, "UNCONNECTED" ) \ EM( SS_CONNECTING, "CONNECTING" ) \ EM( SS_CONNECTED, "CONNECTED" ) \ EMe( SS_DISCONNECTING, "DISCONNECTING" ) #define rpc_show_socket_state(state) \ __print_symbolic(state, RPC_SHOW_SOCKET) RPC_SHOW_SOCKET #define RPC_SHOW_SOCK \ EM( TCP_ESTABLISHED, "ESTABLISHED" ) \ EM( TCP_SYN_SENT, "SYN_SENT" ) \ EM( TCP_SYN_RECV, "SYN_RECV" ) \ EM( TCP_FIN_WAIT1, "FIN_WAIT1" ) \ EM( TCP_FIN_WAIT2, "FIN_WAIT2" ) \ EM( TCP_TIME_WAIT, "TIME_WAIT" ) \ EM( TCP_CLOSE, "CLOSE" ) \ EM( TCP_CLOSE_WAIT, "CLOSE_WAIT" ) \ EM( TCP_LAST_ACK, "LAST_ACK" ) \ EM( TCP_LISTEN, "LISTEN" ) \ EMe( TCP_CLOSING, "CLOSING" ) #define rpc_show_sock_state(state) \ __print_symbolic(state, RPC_SHOW_SOCK) RPC_SHOW_SOCK #include <trace/events/net_probe_common.h> /* * Now redefine the EM() and EMe() macros to map the enums to the strings * that will be printed in the output. */ #undef EM #undef EMe #define EM(a, b) {a, b}, #define EMe(a, b) {a, b} DECLARE_EVENT_CLASS(xs_socket_event, TP_PROTO( struct rpc_xprt *xprt, struct socket *socket ), TP_ARGS(xprt, socket), TP_STRUCT__entry( __field(unsigned int, socket_state) __field(unsigned int, sock_state) __field(unsigned long long, ino) __array(__u8, saddr, sizeof(struct sockaddr_in6)) __array(__u8, daddr, sizeof(struct sockaddr_in6)) ), TP_fast_assign( struct inode *inode = SOCK_INODE(socket); const struct sock *sk = socket->sk; const struct inet_sock *inet = inet_sk(sk); memset(__entry->saddr, 0, sizeof(struct sockaddr_in6)); memset(__entry->daddr, 0, sizeof(struct sockaddr_in6)); TP_STORE_ADDR_PORTS(__entry, inet, sk); __entry->socket_state = socket->state; __entry->sock_state = socket->sk->sk_state; __entry->ino = (unsigned long long)inode->i_ino; ), TP_printk( "socket:[%llu] srcaddr=%pISpc dstaddr=%pISpc " "state=%u (%s) sk_state=%u (%s)", __entry->ino, __entry->saddr, __entry->daddr, __entry->socket_state, rpc_show_socket_state(__entry->socket_state), __entry->sock_state, rpc_show_sock_state(__entry->sock_state) ) ); #define DEFINE_RPC_SOCKET_EVENT(name) \ DEFINE_EVENT(xs_socket_event, name, \ TP_PROTO( \ struct rpc_xprt *xprt, \ struct socket *socket \ ), \ TP_ARGS(xprt, socket)) DECLARE_EVENT_CLASS(xs_socket_event_done, TP_PROTO( struct rpc_xprt *xprt, struct socket *socket, int error ), TP_ARGS(xprt, socket, error), TP_STRUCT__entry( __field(int, error) __field(unsigned int, socket_state) __field(unsigned int, sock_state) __field(unsigned long long, ino) __array(__u8, saddr, sizeof(struct sockaddr_in6)) __array(__u8, daddr, sizeof(struct sockaddr_in6)) ), TP_fast_assign( struct inode *inode = SOCK_INODE(socket); const struct sock *sk = socket->sk; const struct inet_sock *inet = inet_sk(sk); memset(__entry->saddr, 0, sizeof(struct sockaddr_in6)); memset(__entry->daddr, 0, sizeof(struct sockaddr_in6)); TP_STORE_ADDR_PORTS(__entry, inet, sk); __entry->socket_state = socket->state; __entry->sock_state = socket->sk->sk_state; __entry->ino = (unsigned long long)inode->i_ino; __entry->error = error; ), TP_printk( "error=%d socket:[%llu] srcaddr=%pISpc dstaddr=%pISpc " "state=%u (%s) sk_state=%u (%s)", __entry->error, __entry->ino, __entry->saddr, __entry->daddr, __entry->socket_state, rpc_show_socket_state(__entry->socket_state), __entry->sock_state, rpc_show_sock_state(__entry->sock_state) ) ); #define DEFINE_RPC_SOCKET_EVENT_DONE(name) \ DEFINE_EVENT(xs_socket_event_done, name, \ TP_PROTO( \ struct rpc_xprt *xprt, \ struct socket *socket, \ int error \ ), \ TP_ARGS(xprt, socket, error)) DEFINE_RPC_SOCKET_EVENT(rpc_socket_state_change); DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_connect); DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_error); DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_reset_connection); DEFINE_RPC_SOCKET_EVENT(rpc_socket_close); DEFINE_RPC_SOCKET_EVENT(rpc_socket_shutdown); TRACE_EVENT(rpc_socket_nospace, TP_PROTO( const struct rpc_rqst *rqst, const struct sock_xprt *transport ), TP_ARGS(rqst, transport), TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) __field(unsigned int, total) __field(unsigned int, remaining) ), TP_fast_assign( __entry->task_id = rqst->rq_task->tk_pid; __entry->client_id = rqst->rq_task->tk_client->cl_clid; __entry->total = rqst->rq_slen; __entry->remaining = rqst->rq_slen - transport->xmit.offset; ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " total=%u remaining=%u", __entry->task_id, __entry->client_id, __entry->total, __entry->remaining ) ); #define rpc_show_xprt_state(x) \ __print_flags(x, "|", \ { BIT(XPRT_LOCKED), "LOCKED" }, \ { BIT(XPRT_CONNECTED), "CONNECTED" }, \ { BIT(XPRT_CONNECTING), "CONNECTING" }, \ { BIT(XPRT_CLOSE_WAIT), "CLOSE_WAIT" }, \ { BIT(XPRT_BOUND), "BOUND" }, \ { BIT(XPRT_BINDING), "BINDING" }, \ { BIT(XPRT_CLOSING), "CLOSING" }, \ { BIT(XPRT_OFFLINE), "OFFLINE" }, \ { BIT(XPRT_REMOVE), "REMOVE" }, \ { BIT(XPRT_CONGESTED), "CONGESTED" }, \ { BIT(XPRT_CWND_WAIT), "CWND_WAIT" }, \ { BIT(XPRT_WRITE_SPACE), "WRITE_SPACE" }, \ { BIT(XPRT_SND_IS_COOKIE), "SND_IS_COOKIE" }) DECLARE_EVENT_CLASS(rpc_xprt_lifetime_class, TP_PROTO( const struct rpc_xprt *xprt ), TP_ARGS(xprt), TP_STRUCT__entry( __field(unsigned long, state) __string(addr, xprt->address_strings[RPC_DISPLAY_ADDR]) __string(port, xprt->address_strings[RPC_DISPLAY_PORT]) ), TP_fast_assign( __entry->state = xprt->state; __assign_str(addr); __assign_str(port); ), TP_printk("peer=[%s]:%s state=%s", __get_str(addr), __get_str(port), rpc_show_xprt_state(__entry->state)) ); #define DEFINE_RPC_XPRT_LIFETIME_EVENT(name) \ DEFINE_EVENT(rpc_xprt_lifetime_class, \ xprt_##name, \ TP_PROTO( \ const struct rpc_xprt *xprt \ ), \ TP_ARGS(xprt)) DEFINE_RPC_XPRT_LIFETIME_EVENT(create); DEFINE_RPC_XPRT_LIFETIME_EVENT(connect); DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_auto); DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_done); DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_force); DEFINE_RPC_XPRT_LIFETIME_EVENT(destroy); DECLARE_EVENT_CLASS(rpc_xprt_event, TP_PROTO( const struct rpc_xprt *xprt, __be32 xid, int status ), TP_ARGS(xprt, xid, status), TP_STRUCT__entry( __field(u32, xid) __field(int, status) __string(addr, xprt->address_strings[RPC_DISPLAY_ADDR]) __string(port, xprt->address_strings[RPC_DISPLAY_PORT]) ), TP_fast_assign( __entry->xid = be32_to_cpu(xid); __entry->status = status; __assign_str(addr); __assign_str(port); ), TP_printk("peer=[%s]:%s xid=0x%08x status=%d", __get_str(addr), __get_str(port), __entry->xid, __entry->status) ); #define DEFINE_RPC_XPRT_EVENT(name) \ DEFINE_EVENT(rpc_xprt_event, xprt_##name, \ TP_PROTO( \ const struct rpc_xprt *xprt, \ __be32 xid, \ int status \ ), \ TP_ARGS(xprt, xid, status)) DEFINE_RPC_XPRT_EVENT(timer); DEFINE_RPC_XPRT_EVENT(lookup_rqst); TRACE_EVENT(xprt_transmit, TP_PROTO( const struct rpc_rqst *rqst, int status ), TP_ARGS(rqst, status), TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) __field(u32, xid) __field(u32, seqno) __field(int, status) ), TP_fast_assign( __entry->task_id = rqst->rq_task->tk_pid; __entry->client_id = rqst->rq_task->tk_client ? rqst->rq_task->tk_client->cl_clid : -1; __entry->xid = be32_to_cpu(rqst->rq_xid); __entry->seqno = *rqst->rq_seqnos; __entry->status = status; ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x seqno=%u status=%d", __entry->task_id, __entry->client_id, __entry->xid, __entry->seqno, __entry->status) ); TRACE_EVENT(xprt_retransmit, TP_PROTO( const struct rpc_rqst *rqst ), TP_ARGS(rqst), TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) __field(u32, xid) __field(int, ntrans) __field(int, version) __field(unsigned long, timeout) __string(progname, rqst->rq_task->tk_client->cl_program->name) __string(procname, rpc_proc_name(rqst->rq_task)) ), TP_fast_assign( struct rpc_task *task = rqst->rq_task; __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client ? task->tk_client->cl_clid : -1; __entry->xid = be32_to_cpu(rqst->rq_xid); __entry->ntrans = rqst->rq_ntrans; __entry->timeout = task->tk_timeout; __assign_str(progname); __entry->version = task->tk_client->cl_vers; __assign_str(procname); ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x %sv%d %s ntrans=%d timeout=%lu", __entry->task_id, __entry->client_id, __entry->xid, __get_str(progname), __entry->version, __get_str(procname), __entry->ntrans, __entry->timeout ) ); TRACE_EVENT(xprt_ping, TP_PROTO(const struct rpc_xprt *xprt, int status), TP_ARGS(xprt, status), TP_STRUCT__entry( __field(int, status) __string(addr, xprt->address_strings[RPC_DISPLAY_ADDR]) __string(port, xprt->address_strings[RPC_DISPLAY_PORT]) ), TP_fast_assign( __entry->status = status; __assign_str(addr); __assign_str(port); ), TP_printk("peer=[%s]:%s status=%d", __get_str(addr), __get_str(port), __entry->status) ); DECLARE_EVENT_CLASS(xprt_writelock_event, TP_PROTO( const struct rpc_xprt *xprt, const struct rpc_task *task ), TP_ARGS(xprt, task), TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) __field(unsigned int, snd_task_id) ), TP_fast_assign( if (task) { __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client ? task->tk_client->cl_clid : -1; } else { __entry->task_id = -1; __entry->client_id = -1; } if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state)) __entry->snd_task_id = xprt->snd_task->tk_pid; else __entry->snd_task_id = -1; ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " snd_task:" SUNRPC_TRACE_PID_SPECIFIER, __entry->task_id, __entry->client_id, __entry->snd_task_id) ); #define DEFINE_WRITELOCK_EVENT(name) \ DEFINE_EVENT(xprt_writelock_event, xprt_##name, \ TP_PROTO( \ const struct rpc_xprt *xprt, \ const struct rpc_task *task \ ), \ TP_ARGS(xprt, task)) DEFINE_WRITELOCK_EVENT(reserve_xprt); DEFINE_WRITELOCK_EVENT(release_xprt); DECLARE_EVENT_CLASS(xprt_cong_event, TP_PROTO( const struct rpc_xprt *xprt, const struct rpc_task *task ), TP_ARGS(xprt, task), TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) __field(unsigned int, snd_task_id) __field(unsigned long, cong) __field(unsigned long, cwnd) __field(bool, wait) ), TP_fast_assign( if (task) { __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client ? task->tk_client->cl_clid : -1; } else { __entry->task_id = -1; __entry->client_id = -1; } if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state)) __entry->snd_task_id = xprt->snd_task->tk_pid; else __entry->snd_task_id = -1; __entry->cong = xprt->cong; __entry->cwnd = xprt->cwnd; __entry->wait = test_bit(XPRT_CWND_WAIT, &xprt->state); ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " snd_task:" SUNRPC_TRACE_PID_SPECIFIER " cong=%lu cwnd=%lu%s", __entry->task_id, __entry->client_id, __entry->snd_task_id, __entry->cong, __entry->cwnd, __entry->wait ? " (wait)" : "") ); #define DEFINE_CONG_EVENT(name) \ DEFINE_EVENT(xprt_cong_event, xprt_##name, \ TP_PROTO( \ const struct rpc_xprt *xprt, \ const struct rpc_task *task \ ), \ TP_ARGS(xprt, task)) DEFINE_CONG_EVENT(reserve_cong); DEFINE_CONG_EVENT(release_cong); DEFINE_CONG_EVENT(get_cong); DEFINE_CONG_EVENT(put_cong); TRACE_EVENT(xprt_reserve, TP_PROTO( const struct rpc_rqst *rqst ), TP_ARGS(rqst), TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) __field(u32, xid) ), TP_fast_assign( __entry->task_id = rqst->rq_task->tk_pid; __entry->client_id = rqst->rq_task->tk_client->cl_clid; __entry->xid = be32_to_cpu(rqst->rq_xid); ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x", __entry->task_id, __entry->client_id, __entry->xid ) ); TRACE_EVENT(xs_data_ready, TP_PROTO( const struct rpc_xprt *xprt ), TP_ARGS(xprt), TP_STRUCT__entry( __string(addr, xprt->address_strings[RPC_DISPLAY_ADDR]) __string(port, xprt->address_strings[RPC_DISPLAY_PORT]) ), TP_fast_assign( __assign_str(addr); __assign_str(port); ), TP_printk("peer=[%s]:%s", __get_str(addr), __get_str(port)) ); TRACE_EVENT(xs_stream_read_data, TP_PROTO(struct rpc_xprt *xprt, ssize_t err, size_t total), TP_ARGS(xprt, err, total), TP_STRUCT__entry( __field(ssize_t, err) __field(size_t, total) __string(addr, xprt ? xprt->address_strings[RPC_DISPLAY_ADDR] : EVENT_NULL_STR) __string(port, xprt ? xprt->address_strings[RPC_DISPLAY_PORT] : EVENT_NULL_STR) ), TP_fast_assign( __entry->err = err; __entry->total = total; __assign_str(addr); __assign_str(port); ), TP_printk("peer=[%s]:%s err=%zd total=%zu", __get_str(addr), __get_str(port), __entry->err, __entry->total) ); TRACE_EVENT(xs_stream_read_request, TP_PROTO(struct sock_xprt *xs), TP_ARGS(xs), TP_STRUCT__entry( __string(addr, xs->xprt.address_strings[RPC_DISPLAY_ADDR]) __string(port, xs->xprt.address_strings[RPC_DISPLAY_PORT]) __field(u32, xid) __field(unsigned long, copied) __field(unsigned int, reclen) __field(unsigned int, offset) ), TP_fast_assign( __assign_str(addr); __assign_str(port); __entry->xid = be32_to_cpu(xs->recv.xid); __entry->copied = xs->recv.copied; __entry->reclen = xs->recv.len; __entry->offset = xs->recv.offset; ), TP_printk("peer=[%s]:%s xid=0x%08x copied=%lu reclen=%u offset=%u", __get_str(addr), __get_str(port), __entry->xid, __entry->copied, __entry->reclen, __entry->offset) ); TRACE_EVENT(rpcb_getport, TP_PROTO( const struct rpc_clnt *clnt, const struct rpc_task *task, unsigned int bind_version ), TP_ARGS(clnt, task, bind_version), TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) __field(unsigned int, program) __field(unsigned int, version) __field(int, protocol) __field(unsigned int, bind_version) __string(servername, task->tk_xprt->servername) ), TP_fast_assign( __entry->task_id = task->tk_pid; __entry->client_id = clnt->cl_clid; __entry->program = clnt->cl_prog; __entry->version = clnt->cl_vers; __entry->protocol = task->tk_xprt->prot; __entry->bind_version = bind_version; __assign_str(servername); ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " server=%s program=%u version=%u protocol=%d bind_version=%u", __entry->task_id, __entry->client_id, __get_str(servername), __entry->program, __entry->version, __entry->protocol, __entry->bind_version ) ); TRACE_EVENT(rpcb_setport, TP_PROTO( const struct rpc_task *task, int status, unsigned short port ), TP_ARGS(task, status, port), TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) __field(int, status) __field(unsigned short, port) ), TP_fast_assign( __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; __entry->status = status; __entry->port = port; ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " status=%d port=%u", __entry->task_id, __entry->client_id, __entry->status, __entry->port ) ); TRACE_EVENT(pmap_register, TP_PROTO( u32 program, u32 version, int protocol, unsigned short port ), TP_ARGS(program, version, protocol, port), TP_STRUCT__entry( __field(unsigned int, program) __field(unsigned int, version) __field(int, protocol) __field(unsigned int, port) ), TP_fast_assign( __entry->program = program; __entry->version = version; __entry->protocol = protocol; __entry->port = port; ), TP_printk("program=%u version=%u protocol=%d port=%u", __entry->program, __entry->version, __entry->protocol, __entry->port ) ); TRACE_EVENT(rpcb_register, TP_PROTO( u32 program, u32 version, const char *addr, const char *netid ), TP_ARGS(program, version, addr, netid), TP_STRUCT__entry( __field(unsigned int, program) __field(unsigned int, version) __string(addr, addr) __string(netid, netid) ), TP_fast_assign( __entry->program = program; __entry->version = version; __assign_str(addr); __assign_str(netid); ), TP_printk("program=%u version=%u addr=%s netid=%s", __entry->program, __entry->version, __get_str(addr), __get_str(netid) ) ); TRACE_EVENT(rpcb_unregister, TP_PROTO( u32 program, u32 version, const char *netid ), TP_ARGS(program, version, netid), TP_STRUCT__entry( __field(unsigned int, program) __field(unsigned int, version) __string(netid, netid) ), TP_fast_assign( __entry->program = program; __entry->version = version; __assign_str(netid); ), TP_printk("program=%u version=%u netid=%s", __entry->program, __entry->version, __get_str(netid) ) ); /** ** RPC-over-TLS tracepoints **/ DECLARE_EVENT_CLASS(rpc_tls_class, TP_PROTO( const struct rpc_clnt *clnt, const struct rpc_xprt *xprt ), TP_ARGS(clnt, xprt), TP_STRUCT__entry( __field(unsigned long, requested_policy) __field(u32, version) __string(servername, xprt->servername) __string(progname, clnt->cl_program->name) ), TP_fast_assign( __entry->requested_policy = clnt->cl_xprtsec.policy; __entry->version = clnt->cl_vers; __assign_str(servername); __assign_str(progname); ), TP_printk("server=%s %sv%u requested_policy=%s", __get_str(servername), __get_str(progname), __entry->version, rpc_show_xprtsec_policy(__entry->requested_policy) ) ); #define DEFINE_RPC_TLS_EVENT(name) \ DEFINE_EVENT(rpc_tls_class, rpc_tls_##name, \ TP_PROTO( \ const struct rpc_clnt *clnt, \ const struct rpc_xprt *xprt \ ), \ TP_ARGS(clnt, xprt)) DEFINE_RPC_TLS_EVENT(unavailable); DEFINE_RPC_TLS_EVENT(not_started); /* Record an xdr_buf containing a fully-formed RPC message */ DECLARE_EVENT_CLASS(svc_xdr_msg_class, TP_PROTO( const struct xdr_buf *xdr ), TP_ARGS(xdr), TP_STRUCT__entry( __field(u32, xid) __field(const void *, head_base) __field(size_t, head_len) __field(const void *, tail_base) __field(size_t, tail_len) __field(unsigned int, page_len) __field(unsigned int, msg_len) ), TP_fast_assign( __be32 *p = (__be32 *)xdr->head[0].iov_base; __entry->xid = be32_to_cpu(*p); __entry->head_base = p; __entry->head_len = xdr->head[0].iov_len; __entry->tail_base = xdr->tail[0].iov_base; __entry->tail_len = xdr->tail[0].iov_len; __entry->page_len = xdr->page_len; __entry->msg_len = xdr->len; ), TP_printk("xid=0x%08x head=[%p,%zu] page=%u tail=[%p,%zu] len=%u", __entry->xid, __entry->head_base, __entry->head_len, __entry->page_len, __entry->tail_base, __entry->tail_len, __entry->msg_len ) ); #define DEFINE_SVCXDRMSG_EVENT(name) \ DEFINE_EVENT(svc_xdr_msg_class, \ svc_xdr_##name, \ TP_PROTO( \ const struct xdr_buf *xdr \ ), \ TP_ARGS(xdr)) DEFINE_SVCXDRMSG_EVENT(recvfrom); /* Record an xdr_buf containing arbitrary data, tagged with an XID */ DECLARE_EVENT_CLASS(svc_xdr_buf_class, TP_PROTO( __be32 xid, const struct xdr_buf *xdr ), TP_ARGS(xid, xdr), TP_STRUCT__entry( __field(u32, xid) __field(const void *, head_base) __field(size_t, head_len) __field(const void *, tail_base) __field(size_t, tail_len) __field(unsigned int, page_base) __field(unsigned int, page_len) __field(unsigned int, msg_len) ), TP_fast_assign( __entry->xid = be32_to_cpu(xid); __entry->head_base = xdr->head[0].iov_base; __entry->head_len = xdr->head[0].iov_len; __entry->tail_base = xdr->tail[0].iov_base; __entry->tail_len = xdr->tail[0].iov_len; __entry->page_base = xdr->page_base; __entry->page_len = xdr->page_len; __entry->msg_len = xdr->len; ), TP_printk("xid=0x%08x head=[%p,%zu] page=%u(%u) tail=[%p,%zu] len=%u", __entry->xid, __entry->head_base, __entry->head_len, __entry->page_len, __entry->page_base, __entry->tail_base, __entry->tail_len, __entry->msg_len ) ); #define DEFINE_SVCXDRBUF_EVENT(name) \ DEFINE_EVENT(svc_xdr_buf_class, \ svc_xdr_##name, \ TP_PROTO( \ __be32 xid, \ const struct xdr_buf *xdr \ ), \ TP_ARGS(xid, xdr)) DEFINE_SVCXDRBUF_EVENT(sendto); /* * from include/linux/sunrpc/svc.h */ #define SVC_RQST_FLAG_LIST \ svc_rqst_flag(SECURE) \ svc_rqst_flag(LOCAL) \ svc_rqst_flag(USEDEFERRAL) \ svc_rqst_flag(DROPME) \ svc_rqst_flag(VICTIM) \ svc_rqst_flag_end(DATA) #undef svc_rqst_flag #undef svc_rqst_flag_end #define svc_rqst_flag(x) TRACE_DEFINE_ENUM(RQ_##x); #define svc_rqst_flag_end(x) TRACE_DEFINE_ENUM(RQ_##x); SVC_RQST_FLAG_LIST #undef svc_rqst_flag #undef svc_rqst_flag_end #define svc_rqst_flag(x) { BIT(RQ_##x), #x }, #define svc_rqst_flag_end(x) { BIT(RQ_##x), #x } #define show_rqstp_flags(flags) \ __print_flags(flags, "|", SVC_RQST_FLAG_LIST) TRACE_DEFINE_ENUM(SVC_GARBAGE); TRACE_DEFINE_ENUM(SVC_VALID); TRACE_DEFINE_ENUM(SVC_NEGATIVE); TRACE_DEFINE_ENUM(SVC_OK); TRACE_DEFINE_ENUM(SVC_DROP); TRACE_DEFINE_ENUM(SVC_CLOSE); TRACE_DEFINE_ENUM(SVC_DENIED); TRACE_DEFINE_ENUM(SVC_PENDING); TRACE_DEFINE_ENUM(SVC_COMPLETE); #define show_svc_auth_status(status) \ __print_symbolic(status, \ { SVC_GARBAGE, "SVC_GARBAGE" }, \ { SVC_VALID, "SVC_VALID" }, \ { SVC_NEGATIVE, "SVC_NEGATIVE" }, \ { SVC_OK, "SVC_OK" }, \ { SVC_DROP, "SVC_DROP" }, \ { SVC_CLOSE, "SVC_CLOSE" }, \ { SVC_DENIED, "SVC_DENIED" }, \ { SVC_PENDING, "SVC_PENDING" }, \ { SVC_COMPLETE, "SVC_COMPLETE" }) #define SVC_RQST_ENDPOINT_FIELDS(r) \ __sockaddr(server, (r)->rq_xprt->xpt_locallen) \ __sockaddr(client, (r)->rq_xprt->xpt_remotelen) \ __field(unsigned int, netns_ino) \ __field(u32, xid) #define SVC_RQST_ENDPOINT_ASSIGNMENTS(r) \ do { \ struct svc_xprt *xprt = (r)->rq_xprt; \ __assign_sockaddr(server, &xprt->xpt_local, \ xprt->xpt_locallen); \ __assign_sockaddr(client, &xprt->xpt_remote, \ xprt->xpt_remotelen); \ __entry->netns_ino = xprt->xpt_net->ns.inum; \ __entry->xid = be32_to_cpu((r)->rq_xid); \ } while (0) #define SVC_RQST_ENDPOINT_FORMAT \ "xid=0x%08x server=%pISpc client=%pISpc" #define SVC_RQST_ENDPOINT_VARARGS \ __entry->xid, __get_sockaddr(server), __get_sockaddr(client) TRACE_EVENT_CONDITION(svc_authenticate, TP_PROTO( const struct svc_rqst *rqst, enum svc_auth_status auth_res ), TP_ARGS(rqst, auth_res), TP_CONDITION(auth_res != SVC_OK && auth_res != SVC_COMPLETE), TP_STRUCT__entry( SVC_RQST_ENDPOINT_FIELDS(rqst) __field(unsigned long, svc_status) __field(unsigned long, auth_stat) ), TP_fast_assign( SVC_RQST_ENDPOINT_ASSIGNMENTS(rqst); __entry->svc_status = auth_res; __entry->auth_stat = be32_to_cpu(rqst->rq_auth_stat); ), TP_printk(SVC_RQST_ENDPOINT_FORMAT " auth_res=%s auth_stat=%s", SVC_RQST_ENDPOINT_VARARGS, show_svc_auth_status(__entry->svc_status), rpc_show_auth_stat(__entry->auth_stat)) ); TRACE_EVENT(svc_process, TP_PROTO(const struct svc_rqst *rqst, const char *name), TP_ARGS(rqst, name), TP_STRUCT__entry( __field(u32, xid) __field(u32, vers) __field(u32, proc) __string(service, name) __string(procedure, svc_proc_name(rqst)) __string(addr, rqst->rq_xprt ? rqst->rq_xprt->xpt_remotebuf : EVENT_NULL_STR) ), TP_fast_assign( __entry->xid = be32_to_cpu(rqst->rq_xid); __entry->vers = rqst->rq_vers; __entry->proc = rqst->rq_proc; __assign_str(service); __assign_str(procedure); __assign_str(addr); ), TP_printk("addr=%s xid=0x%08x service=%s vers=%u proc=%s", __get_str(addr), __entry->xid, __get_str(service), __entry->vers, __get_str(procedure) ) ); DECLARE_EVENT_CLASS(svc_rqst_event, TP_PROTO( const struct svc_rqst *rqst ), TP_ARGS(rqst), TP_STRUCT__entry( SVC_RQST_ENDPOINT_FIELDS(rqst) __field(unsigned long, flags) ), TP_fast_assign( SVC_RQST_ENDPOINT_ASSIGNMENTS(rqst); __entry->flags = rqst->rq_flags; ), TP_printk(SVC_RQST_ENDPOINT_FORMAT " flags=%s", SVC_RQST_ENDPOINT_VARARGS, show_rqstp_flags(__entry->flags)) ); #define DEFINE_SVC_RQST_EVENT(name) \ DEFINE_EVENT(svc_rqst_event, svc_##name, \ TP_PROTO( \ const struct svc_rqst *rqst \ ), \ TP_ARGS(rqst)) DEFINE_SVC_RQST_EVENT(defer); DEFINE_SVC_RQST_EVENT(drop); DECLARE_EVENT_CLASS(svc_rqst_status, TP_PROTO( const struct svc_rqst *rqst, int status ), TP_ARGS(rqst, status), TP_STRUCT__entry( SVC_RQST_ENDPOINT_FIELDS(rqst) __field(int, status) __field(unsigned long, flags) ), TP_fast_assign( SVC_RQST_ENDPOINT_ASSIGNMENTS(rqst); __entry->status = status; __entry->flags = rqst->rq_flags; ), TP_printk(SVC_RQST_ENDPOINT_FORMAT " status=%d flags=%s", SVC_RQST_ENDPOINT_VARARGS, __entry->status, show_rqstp_flags(__entry->flags)) ); DEFINE_EVENT(svc_rqst_status, svc_send, TP_PROTO(const struct svc_rqst *rqst, int status), TP_ARGS(rqst, status)); TRACE_EVENT(svc_replace_page_err, TP_PROTO(const struct svc_rqst *rqst), TP_ARGS(rqst), TP_STRUCT__entry( SVC_RQST_ENDPOINT_FIELDS(rqst) __field(const void *, begin) __field(const void *, respages) __field(const void *, nextpage) ), TP_fast_assign( SVC_RQST_ENDPOINT_ASSIGNMENTS(rqst); __entry->begin = rqst->rq_pages; __entry->respages = rqst->rq_respages; __entry->nextpage = rqst->rq_next_page; ), TP_printk(SVC_RQST_ENDPOINT_FORMAT " begin=%p respages=%p nextpage=%p", SVC_RQST_ENDPOINT_VARARGS, __entry->begin, __entry->respages, __entry->nextpage) ); TRACE_EVENT(svc_stats_latency, TP_PROTO( const struct svc_rqst *rqst ), TP_ARGS(rqst), TP_STRUCT__entry( SVC_RQST_ENDPOINT_FIELDS(rqst) __field(unsigned long, execute) __string(procedure, svc_proc_name(rqst)) ), TP_fast_assign( SVC_RQST_ENDPOINT_ASSIGNMENTS(rqst); __entry->execute = ktime_to_us(ktime_sub(ktime_get(), rqst->rq_stime)); __assign_str(procedure); ), TP_printk(SVC_RQST_ENDPOINT_FORMAT " proc=%s execute-us=%lu", SVC_RQST_ENDPOINT_VARARGS, __get_str(procedure), __entry->execute) ); /* * from include/linux/sunrpc/svc_xprt.h */ #define SVC_XPRT_FLAG_LIST \ svc_xprt_flag(BUSY) \ svc_xprt_flag(CONN) \ svc_xprt_flag(CLOSE) \ svc_xprt_flag(DATA) \ svc_xprt_flag(TEMP) \ svc_xprt_flag(DEAD) \ svc_xprt_flag(CHNGBUF) \ svc_xprt_flag(DEFERRED) \ svc_xprt_flag(OLD) \ svc_xprt_flag(LISTENER) \ svc_xprt_flag(CACHE_AUTH) \ svc_xprt_flag(LOCAL) \ svc_xprt_flag(KILL_TEMP) \ svc_xprt_flag(CONG_CTRL) \ svc_xprt_flag(HANDSHAKE) \ svc_xprt_flag(TLS_SESSION) \ svc_xprt_flag_end(PEER_AUTH) #undef svc_xprt_flag #undef svc_xprt_flag_end #define svc_xprt_flag(x) TRACE_DEFINE_ENUM(XPT_##x); #define svc_xprt_flag_end(x) TRACE_DEFINE_ENUM(XPT_##x); SVC_XPRT_FLAG_LIST #undef svc_xprt_flag #undef svc_xprt_flag_end #define svc_xprt_flag(x) { BIT(XPT_##x), #x }, #define svc_xprt_flag_end(x) { BIT(XPT_##x), #x } #define show_svc_xprt_flags(flags) \ __print_flags(flags, "|", SVC_XPRT_FLAG_LIST) TRACE_EVENT(svc_xprt_create_err, TP_PROTO( const char *program, const char *protocol, struct sockaddr *sap, size_t salen, const struct svc_xprt *xprt ), TP_ARGS(program, protocol, sap, salen, xprt), TP_STRUCT__entry( __field(long, error) __string(program, program) __string(protocol, protocol) __sockaddr(addr, salen) ), TP_fast_assign( __entry->error = PTR_ERR(xprt); __assign_str(program); __assign_str(protocol); __assign_sockaddr(addr, sap, salen); ), TP_printk("addr=%pISpc program=%s protocol=%s error=%ld", __get_sockaddr(addr), __get_str(program), __get_str(protocol), __entry->error) ); #define SVC_XPRT_ENDPOINT_FIELDS(x) \ __sockaddr(server, (x)->xpt_locallen) \ __sockaddr(client, (x)->xpt_remotelen) \ __field(unsigned long, flags) \ __field(unsigned int, netns_ino) #define SVC_XPRT_ENDPOINT_ASSIGNMENTS(x) \ do { \ __assign_sockaddr(server, &(x)->xpt_local, \ (x)->xpt_locallen); \ __assign_sockaddr(client, &(x)->xpt_remote, \ (x)->xpt_remotelen); \ __entry->flags = (x)->xpt_flags; \ __entry->netns_ino = (x)->xpt_net->ns.inum; \ } while (0) #define SVC_XPRT_ENDPOINT_FORMAT \ "server=%pISpc client=%pISpc flags=%s" #define SVC_XPRT_ENDPOINT_VARARGS \ __get_sockaddr(server), __get_sockaddr(client), \ show_svc_xprt_flags(__entry->flags) TRACE_EVENT(svc_xprt_enqueue, TP_PROTO( const struct svc_xprt *xprt, unsigned long flags ), TP_ARGS(xprt, flags), TP_STRUCT__entry( SVC_XPRT_ENDPOINT_FIELDS(xprt) ), TP_fast_assign( __assign_sockaddr(server, &xprt->xpt_local, xprt->xpt_locallen); __assign_sockaddr(client, &xprt->xpt_remote, xprt->xpt_remotelen); __entry->flags = flags; __entry->netns_ino = xprt->xpt_net->ns.inum; ), TP_printk(SVC_XPRT_ENDPOINT_FORMAT, SVC_XPRT_ENDPOINT_VARARGS) ); TRACE_EVENT(svc_xprt_dequeue, TP_PROTO( const struct svc_rqst *rqst ), TP_ARGS(rqst), TP_STRUCT__entry( SVC_XPRT_ENDPOINT_FIELDS(rqst->rq_xprt) __field(unsigned long, wakeup) __field(unsigned long, qtime) ), TP_fast_assign( ktime_t ktime = ktime_get(); SVC_XPRT_ENDPOINT_ASSIGNMENTS(rqst->rq_xprt); __entry->wakeup = ktime_to_us(ktime_sub(ktime, rqst->rq_qtime)); __entry->qtime = ktime_to_us(ktime_sub(ktime, rqst->rq_xprt->xpt_qtime)); ), TP_printk(SVC_XPRT_ENDPOINT_FORMAT " wakeup-us=%lu qtime-us=%lu", SVC_XPRT_ENDPOINT_VARARGS, __entry->wakeup, __entry->qtime) ); DECLARE_EVENT_CLASS(svc_xprt_event, TP_PROTO( const struct svc_xprt *xprt ), TP_ARGS(xprt), TP_STRUCT__entry( SVC_XPRT_ENDPOINT_FIELDS(xprt) ), TP_fast_assign( SVC_XPRT_ENDPOINT_ASSIGNMENTS(xprt); ), TP_printk(SVC_XPRT_ENDPOINT_FORMAT, SVC_XPRT_ENDPOINT_VARARGS) ); #define DEFINE_SVC_XPRT_EVENT(name) \ DEFINE_EVENT(svc_xprt_event, svc_xprt_##name, \ TP_PROTO( \ const struct svc_xprt *xprt \ ), \ TP_ARGS(xprt)) DEFINE_SVC_XPRT_EVENT(no_write_space); DEFINE_SVC_XPRT_EVENT(close); DEFINE_SVC_XPRT_EVENT(detach); DEFINE_SVC_XPRT_EVENT(free); #define DEFINE_SVC_TLS_EVENT(name) \ DEFINE_EVENT(svc_xprt_event, svc_tls_##name, \ TP_PROTO(const struct svc_xprt *xprt), \ TP_ARGS(xprt)) DEFINE_SVC_TLS_EVENT(start); DEFINE_SVC_TLS_EVENT(upcall); DEFINE_SVC_TLS_EVENT(unavailable); DEFINE_SVC_TLS_EVENT(not_started); DEFINE_SVC_TLS_EVENT(timed_out); TRACE_EVENT(svc_xprt_accept, TP_PROTO( const struct svc_xprt *xprt, const char *service ), TP_ARGS(xprt, service), TP_STRUCT__entry( SVC_XPRT_ENDPOINT_FIELDS(xprt) __string(protocol, xprt->xpt_class->xcl_name) __string(service, service) ), TP_fast_assign( SVC_XPRT_ENDPOINT_ASSIGNMENTS(xprt); __assign_str(protocol); __assign_str(service); ), TP_printk(SVC_XPRT_ENDPOINT_FORMAT " protocol=%s service=%s", SVC_XPRT_ENDPOINT_VARARGS, __get_str(protocol), __get_str(service) ) ); DECLARE_EVENT_CLASS(svc_pool_thread_event, TP_PROTO(const struct svc_pool *pool, pid_t pid), TP_ARGS(pool, pid), TP_STRUCT__entry( __field(unsigned int, pool_id) __field(pid_t, pid) ), TP_fast_assign( __entry->pool_id = pool->sp_id; __entry->pid = pid; ), TP_printk("pool=%u pid=%d", __entry->pool_id, __entry->pid) ); #define DEFINE_SVC_POOL_THREAD_EVENT(name) \ DEFINE_EVENT(svc_pool_thread_event, svc_pool_thread_##name, \ TP_PROTO( \ const struct svc_pool *pool, pid_t pid \ ), \ TP_ARGS(pool, pid)) DEFINE_SVC_POOL_THREAD_EVENT(wake); DEFINE_SVC_POOL_THREAD_EVENT(running); DEFINE_SVC_POOL_THREAD_EVENT(noidle); TRACE_EVENT(svc_alloc_arg_err, TP_PROTO( unsigned int requested, unsigned int allocated ), TP_ARGS(requested, allocated), TP_STRUCT__entry( __field(unsigned int, requested) __field(unsigned int, allocated) ), TP_fast_assign( __entry->requested = requested; __entry->allocated = allocated; ), TP_printk("requested=%u allocated=%u", __entry->requested, __entry->allocated) ); DECLARE_EVENT_CLASS(svc_deferred_event, TP_PROTO( const struct svc_deferred_req *dr ), TP_ARGS(dr), TP_STRUCT__entry( __field(const void *, dr) __field(u32, xid) __sockaddr(addr, dr->addrlen) ), TP_fast_assign( __entry->dr = dr; __entry->xid = be32_to_cpu(*(__be32 *)dr->args); __assign_sockaddr(addr, &dr->addr, dr->addrlen); ), TP_printk("addr=%pISpc dr=%p xid=0x%08x", __get_sockaddr(addr), __entry->dr, __entry->xid) ); #define DEFINE_SVC_DEFERRED_EVENT(name) \ DEFINE_EVENT(svc_deferred_event, svc_defer_##name, \ TP_PROTO( \ const struct svc_deferred_req *dr \ ), \ TP_ARGS(dr)) DEFINE_SVC_DEFERRED_EVENT(drop); DEFINE_SVC_DEFERRED_EVENT(queue); DEFINE_SVC_DEFERRED_EVENT(recv); DECLARE_EVENT_CLASS(svcsock_lifetime_class, TP_PROTO( const void *svsk, const struct socket *socket ), TP_ARGS(svsk, socket), TP_STRUCT__entry( __field(unsigned int, netns_ino) __field(const void *, svsk) __field(const void *, sk) __field(unsigned long, type) __field(unsigned long, family) __field(unsigned long, state) ), TP_fast_assign( struct sock *sk = socket->sk; __entry->netns_ino = sock_net(sk)->ns.inum; __entry->svsk = svsk; __entry->sk = sk; __entry->type = socket->type; __entry->family = sk->sk_family; __entry->state = sk->sk_state; ), TP_printk("svsk=%p type=%s family=%s%s", __entry->svsk, show_socket_type(__entry->type), rpc_show_address_family(__entry->family), __entry->state == TCP_LISTEN ? " (listener)" : "" ) ); #define DEFINE_SVCSOCK_LIFETIME_EVENT(name) \ DEFINE_EVENT(svcsock_lifetime_class, name, \ TP_PROTO( \ const void *svsk, \ const struct socket *socket \ ), \ TP_ARGS(svsk, socket)) DEFINE_SVCSOCK_LIFETIME_EVENT(svcsock_new); DEFINE_SVCSOCK_LIFETIME_EVENT(svcsock_free); TRACE_EVENT(svcsock_marker, TP_PROTO( const struct svc_xprt *xprt, __be32 marker ), TP_ARGS(xprt, marker), TP_STRUCT__entry( __field(unsigned int, length) __field(bool, last) __string(addr, xprt->xpt_remotebuf) ), TP_fast_assign( __entry->length = be32_to_cpu(marker) & RPC_FRAGMENT_SIZE_MASK; __entry->last = be32_to_cpu(marker) & RPC_LAST_STREAM_FRAGMENT; __assign_str(addr); ), TP_printk("addr=%s length=%u%s", __get_str(addr), __entry->length, __entry->last ? " (last)" : "") ); DECLARE_EVENT_CLASS(svcsock_class, TP_PROTO( const struct svc_xprt *xprt, ssize_t result ), TP_ARGS(xprt, result), TP_STRUCT__entry( __field(ssize_t, result) __field(unsigned long, flags) __string(addr, xprt->xpt_remotebuf) ), TP_fast_assign( __entry->result = result; __entry->flags = xprt->xpt_flags; __assign_str(addr); ), TP_printk("addr=%s result=%zd flags=%s", __get_str(addr), __entry->result, show_svc_xprt_flags(__entry->flags) ) ); #define DEFINE_SVCSOCK_EVENT(name) \ DEFINE_EVENT(svcsock_class, svcsock_##name, \ TP_PROTO( \ const struct svc_xprt *xprt, \ ssize_t result \ ), \ TP_ARGS(xprt, result)) DEFINE_SVCSOCK_EVENT(udp_send); DEFINE_SVCSOCK_EVENT(udp_recv); DEFINE_SVCSOCK_EVENT(udp_recv_err); DEFINE_SVCSOCK_EVENT(tcp_send); DEFINE_SVCSOCK_EVENT(tcp_recv); DEFINE_SVCSOCK_EVENT(tcp_recv_eagain); DEFINE_SVCSOCK_EVENT(tcp_recv_err); DEFINE_SVCSOCK_EVENT(data_ready); DEFINE_SVCSOCK_EVENT(write_space); TRACE_EVENT(svcsock_tcp_recv_short, TP_PROTO( const struct svc_xprt *xprt, u32 expected, u32 received ), TP_ARGS(xprt, expected, received), TP_STRUCT__entry( __field(u32, expected) __field(u32, received) __field(unsigned long, flags) __string(addr, xprt->xpt_remotebuf) ), TP_fast_assign( __entry->expected = expected; __entry->received = received; __entry->flags = xprt->xpt_flags; __assign_str(addr); ), TP_printk("addr=%s flags=%s expected=%u received=%u", __get_str(addr), show_svc_xprt_flags(__entry->flags), __entry->expected, __entry->received ) ); TRACE_EVENT(svcsock_tcp_state, TP_PROTO( const struct svc_xprt *xprt, const struct socket *socket ), TP_ARGS(xprt, socket), TP_STRUCT__entry( __field(unsigned long, socket_state) __field(unsigned long, sock_state) __field(unsigned long, flags) __string(addr, xprt->xpt_remotebuf) ), TP_fast_assign( __entry->socket_state = socket->state; __entry->sock_state = socket->sk->sk_state; __entry->flags = xprt->xpt_flags; __assign_str(addr); ), TP_printk("addr=%s state=%s sk_state=%s flags=%s", __get_str(addr), rpc_show_socket_state(__entry->socket_state), rpc_show_sock_state(__entry->sock_state), show_svc_xprt_flags(__entry->flags) ) ); DECLARE_EVENT_CLASS(svcsock_accept_class, TP_PROTO( const struct svc_xprt *xprt, const char *service, long status ), TP_ARGS(xprt, service, status), TP_STRUCT__entry( __field(long, status) __string(service, service) __field(unsigned int, netns_ino) ), TP_fast_assign( __entry->status = status; __assign_str(service); __entry->netns_ino = xprt->xpt_net->ns.inum; ), TP_printk("addr=listener service=%s status=%ld", __get_str(service), __entry->status ) ); #define DEFINE_ACCEPT_EVENT(name) \ DEFINE_EVENT(svcsock_accept_class, svcsock_##name##_err, \ TP_PROTO( \ const struct svc_xprt *xprt, \ const char *service, \ long status \ ), \ TP_ARGS(xprt, service, status)) DEFINE_ACCEPT_EVENT(accept); DEFINE_ACCEPT_EVENT(getpeername); DECLARE_EVENT_CLASS(cache_event, TP_PROTO( const struct cache_detail *cd, const struct cache_head *h ), TP_ARGS(cd, h), TP_STRUCT__entry( __field(const struct cache_head *, h) __string(name, cd->name) ), TP_fast_assign( __entry->h = h; __assign_str(name); ), TP_printk("cache=%s entry=%p", __get_str(name), __entry->h) ); #define DEFINE_CACHE_EVENT(name) \ DEFINE_EVENT(cache_event, name, \ TP_PROTO( \ const struct cache_detail *cd, \ const struct cache_head *h \ ), \ TP_ARGS(cd, h)) DEFINE_CACHE_EVENT(cache_entry_expired); DEFINE_CACHE_EVENT(cache_entry_upcall); DEFINE_CACHE_EVENT(cache_entry_update); DEFINE_CACHE_EVENT(cache_entry_make_negative); DEFINE_CACHE_EVENT(cache_entry_no_listener); DECLARE_EVENT_CLASS(register_class, TP_PROTO( const char *program, const u32 version, const int family, const unsigned short protocol, const unsigned short port, int error ), TP_ARGS(program, version, family, protocol, port, error), TP_STRUCT__entry( __field(u32, version) __field(unsigned long, family) __field(unsigned short, protocol) __field(unsigned short, port) __field(int, error) __string(program, program) ), TP_fast_assign( __entry->version = version; __entry->family = family; __entry->protocol = protocol; __entry->port = port; __entry->error = error; __assign_str(program); ), TP_printk("program=%sv%u proto=%s port=%u family=%s error=%d", __get_str(program), __entry->version, __entry->protocol == IPPROTO_UDP ? "udp" : "tcp", __entry->port, rpc_show_address_family(__entry->family), __entry->error ) ); #define DEFINE_REGISTER_EVENT(name) \ DEFINE_EVENT(register_class, svc_##name, \ TP_PROTO( \ const char *program, \ const u32 version, \ const int family, \ const unsigned short protocol, \ const unsigned short port, \ int error \ ), \ TP_ARGS(program, version, family, protocol, \ port, error)) DEFINE_REGISTER_EVENT(register); DEFINE_REGISTER_EVENT(noregister); TRACE_EVENT(svc_unregister, TP_PROTO( const char *program, const u32 version, int error ), TP_ARGS(program, version, error), TP_STRUCT__entry( __field(u32, version) __field(int, error) __string(program, program) ), TP_fast_assign( __entry->version = version; __entry->error = error; __assign_str(program); ), TP_printk("program=%sv%u error=%d", __get_str(program), __entry->version, __entry->error ) ); #endif /* _TRACE_SUNRPC_H */ #include <trace/define_trace.h>
34 221 225 224 5 219 225 227 164 225 228 230 227 225 218 218 219 219 26 218 162 286 231 286 229 229 226 219 220 220 99 279 278 214 221 213 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 /* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef __SOUND_PCM_PARAMS_H #define __SOUND_PCM_PARAMS_H /* * PCM params helpers * Copyright (c) by Abramo Bagnara <abramo@alsa-project.org> */ #include <sound/pcm.h> int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm, struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, int *dir); int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm, struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, int *dir); int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, int *dir); #define SNDRV_MASK_BITS 64 /* we use so far 64bits only */ #define SNDRV_MASK_SIZE (SNDRV_MASK_BITS / 32) #define MASK_OFS(i) ((i) >> 5) #define MASK_BIT(i) (1U << ((i) & 31)) static inline void snd_mask_none(struct snd_mask *mask) { memset(mask, 0, sizeof(*mask)); } static inline void snd_mask_any(struct snd_mask *mask) { memset(mask, 0xff, SNDRV_MASK_SIZE * sizeof(u_int32_t)); } static inline int snd_mask_empty(const struct snd_mask *mask) { int i; for (i = 0; i < SNDRV_MASK_SIZE; i++) if (mask->bits[i]) return 0; return 1; } static inline unsigned int snd_mask_min(const struct snd_mask *mask) { int i; for (i = 0; i < SNDRV_MASK_SIZE; i++) { if (mask->bits[i]) return __ffs(mask->bits[i]) + (i << 5); } return 0; } static inline unsigned int snd_mask_max(const struct snd_mask *mask) { int i; for (i = SNDRV_MASK_SIZE - 1; i >= 0; i--) { if (mask->bits[i]) return __fls(mask->bits[i]) + (i << 5); } return 0; } static inline void snd_mask_set(struct snd_mask *mask, unsigned int val) { mask->bits[MASK_OFS(val)] |= MASK_BIT(val); } /* Most of drivers need only this one */ static inline void snd_mask_set_format(struct snd_mask *mask, snd_pcm_format_t format) { snd_mask_set(mask, (__force unsigned int)format); } static inline void snd_mask_reset(struct snd_mask *mask, unsigned int val) { mask->bits[MASK_OFS(val)] &= ~MASK_BIT(val); } static inline void snd_mask_set_range(struct snd_mask *mask, unsigned int from, unsigned int to) { unsigned int i; for (i = from; i <= to; i++) mask->bits[MASK_OFS(i)] |= MASK_BIT(i); } static inline void snd_mask_reset_range(struct snd_mask *mask, unsigned int from, unsigned int to) { unsigned int i; for (i = from; i <= to; i++) mask->bits[MASK_OFS(i)] &= ~MASK_BIT(i); } static inline void snd_mask_leave(struct snd_mask *mask, unsigned int val) { unsigned int v; v = mask->bits[MASK_OFS(val)] & MASK_BIT(val); snd_mask_none(mask); mask->bits[MASK_OFS(val)] = v; } static inline void snd_mask_intersect(struct snd_mask *mask, const struct snd_mask *v) { int i; for (i = 0; i < SNDRV_MASK_SIZE; i++) mask->bits[i] &= v->bits[i]; } static inline int snd_mask_eq(const struct snd_mask *mask, const struct snd_mask *v) { return ! memcmp(mask, v, SNDRV_MASK_SIZE * sizeof(u_int32_t)); } static inline void snd_mask_copy(struct snd_mask *mask, const struct snd_mask *v) { *mask = *v; } static inline int snd_mask_test(const struct snd_mask *mask, unsigned int val) { return mask->bits[MASK_OFS(val)] & MASK_BIT(val); } /* Most of drivers need only this one */ static inline int snd_mask_test_format(const struct snd_mask *mask, snd_pcm_format_t format) { return snd_mask_test(mask, (__force unsigned int)format); } static inline int snd_mask_single(const struct snd_mask *mask) { int i, c = 0; for (i = 0; i < SNDRV_MASK_SIZE; i++) { if (! mask->bits[i]) continue; if (mask->bits[i] & (mask->bits[i] - 1)) return 0; if (c) return 0; c++; } return 1; } static inline int snd_mask_refine(struct snd_mask *mask, const struct snd_mask *v) { struct snd_mask old; snd_mask_copy(&old, mask); snd_mask_intersect(mask, v); if (snd_mask_empty(mask)) return -EINVAL; return !snd_mask_eq(mask, &old); } static inline int snd_mask_refine_first(struct snd_mask *mask) { if (snd_mask_single(mask)) return 0; snd_mask_leave(mask, snd_mask_min(mask)); return 1; } static inline int snd_mask_refine_last(struct snd_mask *mask) { if (snd_mask_single(mask)) return 0; snd_mask_leave(mask, snd_mask_max(mask)); return 1; } static inline int snd_mask_refine_min(struct snd_mask *mask, unsigned int val) { if (snd_mask_min(mask) >= val) return 0; snd_mask_reset_range(mask, 0, val - 1); if (snd_mask_empty(mask)) return -EINVAL; return 1; } static inline int snd_mask_refine_max(struct snd_mask *mask, unsigned int val) { if (snd_mask_max(mask) <= val) return 0; snd_mask_reset_range(mask, val + 1, SNDRV_MASK_BITS); if (snd_mask_empty(mask)) return -EINVAL; return 1; } static inline int snd_mask_refine_set(struct snd_mask *mask, unsigned int val) { int changed; changed = !snd_mask_single(mask); snd_mask_leave(mask, val); if (snd_mask_empty(mask)) return -EINVAL; return changed; } static inline int snd_mask_value(const struct snd_mask *mask) { return snd_mask_min(mask); } static inline void snd_interval_any(struct snd_interval *i) { i->min = 0; i->openmin = 0; i->max = UINT_MAX; i->openmax = 0; i->integer = 0; i->empty = 0; } static inline void snd_interval_none(struct snd_interval *i) { i->empty = 1; } static inline int snd_interval_checkempty(const struct snd_interval *i) { return (i->min > i->max || (i->min == i->max && (i->openmin || i->openmax))); } static inline int snd_interval_empty(const struct snd_interval *i) { return i->empty; } static inline int snd_interval_single(const struct snd_interval *i) { return (i->min == i->max || (i->min + 1 == i->max && (i->openmin || i->openmax))); } static inline int snd_interval_value(const struct snd_interval *i) { if (i->openmin && !i->openmax) return i->max; return i->min; } static inline int snd_interval_min(const struct snd_interval *i) { return i->min; } static inline int snd_interval_max(const struct snd_interval *i) { unsigned int v; v = i->max; if (i->openmax) v--; return v; } static inline int snd_interval_test(const struct snd_interval *i, unsigned int val) { return !((i->min > val || (i->min == val && i->openmin) || i->max < val || (i->max == val && i->openmax))); } static inline void snd_interval_copy(struct snd_interval *d, const struct snd_interval *s) { *d = *s; } static inline int snd_interval_setinteger(struct snd_interval *i) { if (i->integer) return 0; if (i->openmin && i->openmax && i->min == i->max) return -EINVAL; i->integer = 1; return 1; } static inline int snd_interval_eq(const struct snd_interval *i1, const struct snd_interval *i2) { if (i1->empty) return i2->empty; if (i2->empty) return i1->empty; return i1->min == i2->min && i1->openmin == i2->openmin && i1->max == i2->max && i1->openmax == i2->openmax; } /** * params_access - get the access type from the hw params * @p: hw params */ static inline snd_pcm_access_t params_access(const struct snd_pcm_hw_params *p) { return (__force snd_pcm_access_t)snd_mask_min(hw_param_mask_c(p, SNDRV_PCM_HW_PARAM_ACCESS)); } /** * params_format - get the sample format from the hw params * @p: hw params */ static inline snd_pcm_format_t params_format(const struct snd_pcm_hw_params *p) { return (__force snd_pcm_format_t)snd_mask_min(hw_param_mask_c(p, SNDRV_PCM_HW_PARAM_FORMAT)); } /** * params_subformat - get the sample subformat from the hw params * @p: hw params */ static inline snd_pcm_subformat_t params_subformat(const struct snd_pcm_hw_params *p) { return (__force snd_pcm_subformat_t)snd_mask_min(hw_param_mask_c(p, SNDRV_PCM_HW_PARAM_SUBFORMAT)); } /** * params_period_bytes - get the period size (in bytes) from the hw params * @p: hw params */ static inline unsigned int params_period_bytes(const struct snd_pcm_hw_params *p) { return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_PERIOD_BYTES)->min; } /** * params_width - get the number of bits of the sample format from the hw params * @p: hw params * * This function returns the number of bits per sample that the selected sample * format of the hw params has. */ static inline int params_width(const struct snd_pcm_hw_params *p) { return snd_pcm_format_width(params_format(p)); } /* * params_physical_width - get the storage size of the sample format from the hw params * @p: hw params * * This functions returns the number of bits per sample that the selected sample * format of the hw params takes up in memory. This will be equal or larger than * params_width(). */ static inline int params_physical_width(const struct snd_pcm_hw_params *p) { return snd_pcm_format_physical_width(params_format(p)); } int snd_pcm_hw_params_bits(const struct snd_pcm_hw_params *p); static inline void params_set_format(struct snd_pcm_hw_params *p, snd_pcm_format_t fmt) { snd_mask_set_format(hw_param_mask(p, SNDRV_PCM_HW_PARAM_FORMAT), fmt); } #endif /* __SOUND_PCM_PARAMS_H */
3104 635 42962 1165 17434 19297 19025 19239 17107 17188 17105 19296 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __X86_KERNEL_FPU_CONTEXT_H #define __X86_KERNEL_FPU_CONTEXT_H #include <asm/fpu/xstate.h> #include <asm/trace/fpu.h> /* Functions related to FPU context tracking */ /* * The in-register FPU state for an FPU context on a CPU is assumed to be * valid if the fpu->last_cpu matches the CPU, and the fpu_fpregs_owner_ctx * matches the FPU. * * If the FPU register state is valid, the kernel can skip restoring the * FPU state from memory. * * Any code that clobbers the FPU registers or updates the in-memory * FPU state for a task MUST let the rest of the kernel know that the * FPU registers are no longer valid for this task. * * Invalidate a resource you control: CPU if using the CPU for something else * (with preemption disabled), FPU for the current task, or a task that * is prevented from running by the current task. */ static inline void __cpu_invalidate_fpregs_state(void) { __this_cpu_write(fpu_fpregs_owner_ctx, NULL); } static inline void __fpu_invalidate_fpregs_state(struct fpu *fpu) { fpu->last_cpu = -1; } static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu) { return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu; } static inline void fpregs_deactivate(struct fpu *fpu) { __this_cpu_write(fpu_fpregs_owner_ctx, NULL); trace_x86_fpu_regs_deactivated(fpu); } static inline void fpregs_activate(struct fpu *fpu) { __this_cpu_write(fpu_fpregs_owner_ctx, fpu); trace_x86_fpu_regs_activated(fpu); } /* Internal helper for switch_fpu_return() and signal frame setup */ static inline void fpregs_restore_userregs(void) { struct fpu *fpu = x86_task_fpu(current); int cpu = smp_processor_id(); if (WARN_ON_ONCE(current->flags & (PF_KTHREAD | PF_USER_WORKER))) return; if (!fpregs_state_valid(fpu, cpu)) { /* * This restores _all_ xstate which has not been * established yet. * * If PKRU is enabled, then the PKRU value is already * correct because it was either set in switch_to() or in * flush_thread(). So it is excluded because it might be * not up to date in current->thread.fpu->xsave state. * * XFD state is handled in restore_fpregs_from_fpstate(). */ restore_fpregs_from_fpstate(fpu->fpstate, XFEATURE_MASK_FPSTATE); fpregs_activate(fpu); fpu->last_cpu = cpu; } clear_thread_flag(TIF_NEED_FPU_LOAD); } #endif
63 16 2 2 2 2 2 2 2 2 2 2 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 1 2 7 7 1 6 4 4 2 2 2 7 9 9 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 // SPDX-License-Identifier: GPL-2.0-or-later /* * V4L2 controls framework Request API implementation. * * Copyright (C) 2018-2021 Hans Verkuil <hverkuil@kernel.org> */ #define pr_fmt(fmt) "v4l2-ctrls: " fmt #include <linux/export.h> #include <linux/slab.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-dev.h> #include <media/v4l2-ioctl.h> #include "v4l2-ctrls-priv.h" /* Initialize the request-related fields in a control handler */ void v4l2_ctrl_handler_init_request(struct v4l2_ctrl_handler *hdl) { INIT_LIST_HEAD(&hdl->requests); INIT_LIST_HEAD(&hdl->requests_queued); hdl->request_is_queued = false; media_request_object_init(&hdl->req_obj); } /* Free the request-related fields in a control handler */ void v4l2_ctrl_handler_free_request(struct v4l2_ctrl_handler *hdl) { struct v4l2_ctrl_handler *req, *next_req; /* * Do nothing if this isn't the main handler or the main * handler is not used in any request. * * The main handler can be identified by having a NULL ops pointer in * the request object. */ if (hdl->req_obj.ops || list_empty(&hdl->requests)) return; /* * If the main handler is freed and it is used by handler objects in * outstanding requests, then unbind and put those objects before * freeing the main handler. */ list_for_each_entry_safe(req, next_req, &hdl->requests, requests) { media_request_object_unbind(&req->req_obj); media_request_object_put(&req->req_obj); } } static int v4l2_ctrl_request_clone(struct v4l2_ctrl_handler *hdl, const struct v4l2_ctrl_handler *from) { struct v4l2_ctrl_ref *ref; int err = 0; if (WARN_ON(!hdl || hdl == from)) return -EINVAL; if (hdl->error) return hdl->error; WARN_ON(hdl->lock != &hdl->_lock); mutex_lock(from->lock); list_for_each_entry(ref, &from->ctrl_refs, node) { struct v4l2_ctrl *ctrl = ref->ctrl; struct v4l2_ctrl_ref *new_ref; /* Skip refs inherited from other devices */ if (ref->from_other_dev) continue; err = handler_new_ref(hdl, ctrl, &new_ref, false, true); if (err) break; } mutex_unlock(from->lock); return err; } static void v4l2_ctrl_request_queue(struct media_request_object *obj) { struct v4l2_ctrl_handler *hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj); struct v4l2_ctrl_handler *main_hdl = obj->priv; mutex_lock(main_hdl->lock); list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued); hdl->request_is_queued = true; mutex_unlock(main_hdl->lock); } static void v4l2_ctrl_request_unbind(struct media_request_object *obj) { struct v4l2_ctrl_handler *hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj); struct v4l2_ctrl_handler *main_hdl = obj->priv; mutex_lock(main_hdl->lock); list_del_init(&hdl->requests); if (hdl->request_is_queued) { list_del_init(&hdl->requests_queued); hdl->request_is_queued = false; } mutex_unlock(main_hdl->lock); } static void v4l2_ctrl_request_release(struct media_request_object *obj) { struct v4l2_ctrl_handler *hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj); v4l2_ctrl_handler_free(hdl); kfree(hdl); } static const struct media_request_object_ops req_ops = { .queue = v4l2_ctrl_request_queue, .unbind = v4l2_ctrl_request_unbind, .release = v4l2_ctrl_request_release, }; struct v4l2_ctrl_handler *v4l2_ctrl_request_hdl_find(struct media_request *req, struct v4l2_ctrl_handler *parent) { struct media_request_object *obj; if (WARN_ON(req->state != MEDIA_REQUEST_STATE_VALIDATING && req->state != MEDIA_REQUEST_STATE_QUEUED)) return NULL; obj = media_request_object_find(req, &req_ops, parent); if (obj) return container_of(obj, struct v4l2_ctrl_handler, req_obj); return NULL; } EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_find); struct v4l2_ctrl * v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id) { struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id); return (ref && ref->p_req_valid) ? ref->ctrl : NULL; } EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_ctrl_find); static int v4l2_ctrl_request_bind(struct media_request *req, struct v4l2_ctrl_handler *hdl, struct v4l2_ctrl_handler *from) { int ret; ret = v4l2_ctrl_request_clone(hdl, from); if (!ret) { ret = media_request_object_bind(req, &req_ops, from, false, &hdl->req_obj); if (!ret) { mutex_lock(from->lock); list_add_tail(&hdl->requests, &from->requests); mutex_unlock(from->lock); } } return ret; } static struct media_request_object * v4l2_ctrls_find_req_obj(struct v4l2_ctrl_handler *hdl, struct media_request *req, bool set) { struct media_request_object *obj; struct v4l2_ctrl_handler *new_hdl; int ret; if (IS_ERR(req)) return ERR_CAST(req); if (set && WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING)) return ERR_PTR(-EBUSY); obj = media_request_object_find(req, &req_ops, hdl); if (obj) return obj; /* * If there are no controls in this completed request, * then that can only happen if: * * 1) no controls were present in the queued request, and * 2) v4l2_ctrl_request_complete() could not allocate a * control handler object to store the completed state in. * * So return ENOMEM to indicate that there was an out-of-memory * error. */ if (!set) return ERR_PTR(-ENOMEM); new_hdl = kzalloc_obj(*new_hdl); if (!new_hdl) return ERR_PTR(-ENOMEM); obj = &new_hdl->req_obj; ret = v4l2_ctrl_handler_init(new_hdl, (hdl->nr_of_buckets - 1) * 8); if (!ret) ret = v4l2_ctrl_request_bind(req, new_hdl, hdl); if (ret) { v4l2_ctrl_handler_free(new_hdl); kfree(new_hdl); return ERR_PTR(ret); } media_request_object_get(obj); return obj; } int v4l2_g_ext_ctrls_request(struct v4l2_ctrl_handler *hdl, struct video_device *vdev, struct media_device *mdev, struct v4l2_ext_controls *cs) { struct media_request_object *obj = NULL; struct media_request *req = NULL; int ret; if (!mdev || cs->request_fd < 0) return -EINVAL; req = media_request_get_by_fd(mdev, cs->request_fd); if (IS_ERR(req)) return PTR_ERR(req); if (req->state != MEDIA_REQUEST_STATE_COMPLETE) { media_request_put(req); return -EACCES; } ret = media_request_lock_for_access(req); if (ret) { media_request_put(req); return ret; } obj = v4l2_ctrls_find_req_obj(hdl, req, false); if (IS_ERR(obj)) { media_request_unlock_for_access(req); media_request_put(req); return PTR_ERR(obj); } hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj); ret = v4l2_g_ext_ctrls_common(hdl, cs, vdev); media_request_unlock_for_access(req); media_request_object_put(obj); media_request_put(req); return ret; } int try_set_ext_ctrls_request(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, struct video_device *vdev, struct media_device *mdev, struct v4l2_ext_controls *cs, bool set) { struct media_request_object *obj = NULL; struct media_request *req = NULL; int ret; if (!mdev) { dprintk(vdev, "%s: missing media device\n", video_device_node_name(vdev)); return -EINVAL; } if (cs->request_fd < 0) { dprintk(vdev, "%s: invalid request fd %d\n", video_device_node_name(vdev), cs->request_fd); return -EINVAL; } req = media_request_get_by_fd(mdev, cs->request_fd); if (IS_ERR(req)) { dprintk(vdev, "%s: cannot find request fd %d\n", video_device_node_name(vdev), cs->request_fd); return PTR_ERR(req); } ret = media_request_lock_for_update(req); if (ret) { dprintk(vdev, "%s: cannot lock request fd %d\n", video_device_node_name(vdev), cs->request_fd); media_request_put(req); return ret; } obj = v4l2_ctrls_find_req_obj(hdl, req, set); if (IS_ERR(obj)) { dprintk(vdev, "%s: cannot find request object for request fd %d\n", video_device_node_name(vdev), cs->request_fd); media_request_unlock_for_update(req); media_request_put(req); return PTR_ERR(obj); } hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj); ret = try_set_ext_ctrls_common(fh, hdl, cs, vdev, set); if (ret) dprintk(vdev, "%s: try_set_ext_ctrls_common failed (%d)\n", video_device_node_name(vdev), ret); media_request_unlock_for_update(req); media_request_object_put(obj); media_request_put(req); return ret; } void v4l2_ctrl_request_complete(struct media_request *req, struct v4l2_ctrl_handler *main_hdl) { struct media_request_object *obj; struct v4l2_ctrl_handler *hdl; struct v4l2_ctrl_ref *ref; if (!req || !main_hdl) return; /* * Note that it is valid if nothing was found. It means * that this request doesn't have any controls and so just * wants to leave the controls unchanged. */ obj = media_request_object_find(req, &req_ops, main_hdl); if (!obj) { int ret; /* Create a new request so the driver can return controls */ hdl = kzalloc_obj(*hdl); if (!hdl) return; ret = v4l2_ctrl_handler_init(hdl, (main_hdl->nr_of_buckets - 1) * 8); if (!ret) ret = v4l2_ctrl_request_bind(req, hdl, main_hdl); if (ret) { v4l2_ctrl_handler_free(hdl); kfree(hdl); return; } hdl->request_is_queued = true; obj = media_request_object_find(req, &req_ops, main_hdl); } hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj); list_for_each_entry(ref, &hdl->ctrl_refs, node) { struct v4l2_ctrl *ctrl = ref->ctrl; struct v4l2_ctrl *master = ctrl->cluster[0]; unsigned int i; if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) { v4l2_ctrl_lock(master); /* g_volatile_ctrl will update the current control values */ for (i = 0; i < master->ncontrols; i++) cur_to_new(master->cluster[i]); call_op(master, g_volatile_ctrl); new_to_req(ref); v4l2_ctrl_unlock(master); continue; } if (ref->p_req_valid) continue; /* Copy the current control value into the request */ v4l2_ctrl_lock(ctrl); cur_to_req(ref); v4l2_ctrl_unlock(ctrl); } mutex_lock(main_hdl->lock); WARN_ON(!hdl->request_is_queued); list_del_init(&hdl->requests_queued); hdl->request_is_queued = false; mutex_unlock(main_hdl->lock); media_request_object_complete(obj); media_request_object_put(obj); } EXPORT_SYMBOL(v4l2_ctrl_request_complete); int v4l2_ctrl_request_setup(struct media_request *req, struct v4l2_ctrl_handler *main_hdl) { struct media_request_object *obj; struct v4l2_ctrl_handler *hdl; struct v4l2_ctrl_ref *ref; int ret = 0; if (!req || !main_hdl) return 0; if (WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED)) return -EBUSY; /* * Note that it is valid if nothing was found. It means * that this request doesn't have any controls and so just * wants to leave the controls unchanged. */ obj = media_request_object_find(req, &req_ops, main_hdl); if (!obj) return 0; if (obj->completed) { media_request_object_put(obj); return -EBUSY; } hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj); list_for_each_entry(ref, &hdl->ctrl_refs, node) ref->req_done = false; list_for_each_entry(ref, &hdl->ctrl_refs, node) { struct v4l2_ctrl *ctrl = ref->ctrl; struct v4l2_ctrl *master = ctrl->cluster[0]; bool have_new_data = false; int i; /* * Skip if this control was already handled by a cluster. * Skip button controls and read-only controls. */ if (ref->req_done || (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)) continue; v4l2_ctrl_lock(master); for (i = 0; i < master->ncontrols; i++) { if (master->cluster[i]) { struct v4l2_ctrl_ref *r = find_ref(hdl, master->cluster[i]->id); if (r->p_req_valid) { have_new_data = true; break; } } } if (!have_new_data) { v4l2_ctrl_unlock(master); continue; } for (i = 0; i < master->ncontrols; i++) { if (master->cluster[i]) { struct v4l2_ctrl_ref *r = find_ref(hdl, master->cluster[i]->id); ret = req_to_new(r); if (ret) { v4l2_ctrl_unlock(master); goto error; } master->cluster[i]->is_new = 1; r->req_done = true; } } /* * For volatile autoclusters that are currently in auto mode * we need to discover if it will be set to manual mode. * If so, then we have to copy the current volatile values * first since those will become the new manual values (which * may be overwritten by explicit new values from this set * of controls). */ if (master->is_auto && master->has_volatiles && !is_cur_manual(master)) { s32 new_auto_val = *master->p_new.p_s32; /* * If the new value == the manual value, then copy * the current volatile values. */ if (new_auto_val == master->manual_mode_value) update_from_auto_cluster(master); } ret = try_or_set_cluster(NULL, master, true, 0); v4l2_ctrl_unlock(master); if (ret) break; } error: media_request_object_put(obj); return ret; } EXPORT_SYMBOL(v4l2_ctrl_request_setup);
109 109 90 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright(c) 2004-2005 Intel Corporation. All rights reserved. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/sched/signal.h> #include <linux/fs.h> #include <linux/types.h> #include <linux/string.h> #include <linux/netdevice.h> #include <linux/inetdevice.h> #include <linux/in.h> #include <linux/sysfs.h> #include <linux/ctype.h> #include <linux/inet.h> #include <linux/rtnetlink.h> #include <linux/etherdevice.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <linux/nsproxy.h> #include <net/bonding.h> #define to_bond(cd) ((struct bonding *)(netdev_priv(to_net_dev(cd)))) /* "show" function for the bond_masters attribute. * The class parameter is ignored. */ static ssize_t bonding_show_bonds(const struct class *cls, const struct class_attribute *attr, char *buf) { const struct bond_net *bn = container_of_const(attr, struct bond_net, class_attr_bonding_masters); struct bonding *bond; int res = 0; rcu_read_lock(); list_for_each_entry_rcu(bond, &bn->dev_list, bond_list) { if (res > (PAGE_SIZE - IFNAMSIZ)) { /* not enough space for another interface name */ if ((PAGE_SIZE - res) > 10) res = PAGE_SIZE - 10; res += sysfs_emit_at(buf, res, "++more++ "); break; } res += sysfs_emit_at(buf, res, "%s ", bond->dev->name); } if (res) buf[res-1] = '\n'; /* eat the leftover space */ rcu_read_unlock(); return res; } static struct net_device *bond_get_by_name(const struct bond_net *bn, const char *ifname) { struct bonding *bond; list_for_each_entry(bond, &bn->dev_list, bond_list) { if (strncmp(bond->dev->name, ifname, IFNAMSIZ) == 0) return bond->dev; } return NULL; } /* "store" function for the bond_masters attribute. This is what * creates and deletes entire bonds. * * The class parameter is ignored. */ static ssize_t bonding_store_bonds(const struct class *cls, const struct class_attribute *attr, const char *buffer, size_t count) { const struct bond_net *bn = container_of_const(attr, struct bond_net, class_attr_bonding_masters); char command[IFNAMSIZ + 1] = {0, }; char *ifname; int rv, res = count; sscanf(buffer, "%16s", command); /* IFNAMSIZ*/ ifname = command + 1; if ((strlen(command) <= 1) || !dev_valid_name(ifname)) goto err_no_cmd; if (command[0] == '+') { pr_info("%s is being created...\n", ifname); rv = bond_create(bn->net, ifname); if (rv) { if (rv == -EEXIST) pr_info("%s already exists\n", ifname); else pr_info("%s creation failed\n", ifname); res = rv; } } else if (command[0] == '-') { struct net_device *bond_dev; rtnl_lock(); bond_dev = bond_get_by_name(bn, ifname); if (bond_dev) { pr_info("%s is being deleted...\n", ifname); unregister_netdevice(bond_dev); } else { pr_err("unable to delete non-existent %s\n", ifname); res = -ENODEV; } rtnl_unlock(); } else goto err_no_cmd; /* Always return either count or an error. If you return 0, you'll * get called forever, which is bad. */ return res; err_no_cmd: pr_err("no command found in bonding_masters - use +ifname or -ifname\n"); return -EPERM; } /* class attribute for bond_masters file. This ends up in /sys/class/net */ static const struct class_attribute class_attr_bonding_masters = { .attr = { .name = "bonding_masters", .mode = 0644, }, .show = bonding_show_bonds, .store = bonding_store_bonds, }; /* Generic "store" method for bonding sysfs option setting */ static ssize_t bonding_sysfs_store_option(struct device *d, struct device_attribute *attr, const char *buffer, size_t count) { struct bonding *bond = to_bond(d); const struct bond_option *opt; char *buffer_clone; int ret; opt = bond_opt_get_by_name(attr->attr.name); if (WARN_ON(!opt)) return -ENOENT; buffer_clone = kstrndup(buffer, count, GFP_KERNEL); if (!buffer_clone) return -ENOMEM; ret = bond_opt_tryset_rtnl(bond, opt->id, buffer_clone); if (!ret) ret = count; kfree(buffer_clone); return ret; } /* Show the slaves in the current bond. */ static ssize_t bonding_show_slaves(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); struct list_head *iter; struct slave *slave; int res = 0; rcu_read_lock(); bond_for_each_slave_rcu(bond, slave, iter) { if (res > (PAGE_SIZE - IFNAMSIZ)) { /* not enough space for another interface name */ if ((PAGE_SIZE - res) > 10) res = PAGE_SIZE - 10; res += sysfs_emit_at(buf, res, "++more++ "); break; } res += sysfs_emit_at(buf, res, "%s ", slave->dev->name); } rcu_read_unlock(); if (res) buf[res-1] = '\n'; /* eat the leftover space */ return res; } static DEVICE_ATTR(slaves, 0644, bonding_show_slaves, bonding_sysfs_store_option); /* Show the bonding mode. */ static ssize_t bonding_show_mode(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); const struct bond_opt_value *val; val = bond_opt_get_val(BOND_OPT_MODE, BOND_MODE(bond)); return sysfs_emit(buf, "%s %d\n", val->string, BOND_MODE(bond)); } static DEVICE_ATTR(mode, 0644, bonding_show_mode, bonding_sysfs_store_option); /* Show the bonding transmit hash method. */ static ssize_t bonding_show_xmit_hash(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); const struct bond_opt_value *val; val = bond_opt_get_val(BOND_OPT_XMIT_HASH, bond->params.xmit_policy); return sysfs_emit(buf, "%s %d\n", val->string, bond->params.xmit_policy); } static DEVICE_ATTR(xmit_hash_policy, 0644, bonding_show_xmit_hash, bonding_sysfs_store_option); /* Show arp_validate. */ static ssize_t bonding_show_arp_validate(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); const struct bond_opt_value *val; val = bond_opt_get_val(BOND_OPT_ARP_VALIDATE, bond->params.arp_validate); return sysfs_emit(buf, "%s %d\n", val->string, bond->params.arp_validate); } static DEVICE_ATTR(arp_validate, 0644, bonding_show_arp_validate, bonding_sysfs_store_option); /* Show arp_all_targets. */ static ssize_t bonding_show_arp_all_targets(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); const struct bond_opt_value *val; val = bond_opt_get_val(BOND_OPT_ARP_ALL_TARGETS, bond->params.arp_all_targets); return sysfs_emit(buf, "%s %d\n", val->string, bond->params.arp_all_targets); } static DEVICE_ATTR(arp_all_targets, 0644, bonding_show_arp_all_targets, bonding_sysfs_store_option); /* Show fail_over_mac. */ static ssize_t bonding_show_fail_over_mac(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); const struct bond_opt_value *val; val = bond_opt_get_val(BOND_OPT_FAIL_OVER_MAC, bond->params.fail_over_mac); return sysfs_emit(buf, "%s %d\n", val->string, bond->params.fail_over_mac); } static DEVICE_ATTR(fail_over_mac, 0644, bonding_show_fail_over_mac, bonding_sysfs_store_option); /* Show the arp timer interval. */ static ssize_t bonding_show_arp_interval(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); return sysfs_emit(buf, "%d\n", bond->params.arp_interval); } static DEVICE_ATTR(arp_interval, 0644, bonding_show_arp_interval, bonding_sysfs_store_option); /* Show the arp targets. */ static ssize_t bonding_show_arp_targets(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); int i, res = 0; for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) { if (bond->params.arp_targets[i]) res += sysfs_emit_at(buf, res, "%pI4 ", &bond->params.arp_targets[i]); } if (res) buf[res-1] = '\n'; /* eat the leftover space */ return res; } static DEVICE_ATTR(arp_ip_target, 0644, bonding_show_arp_targets, bonding_sysfs_store_option); /* Show the arp missed max. */ static ssize_t bonding_show_missed_max(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); return sysfs_emit(buf, "%u\n", bond->params.missed_max); } static DEVICE_ATTR(arp_missed_max, 0644, bonding_show_missed_max, bonding_sysfs_store_option); /* Show the up and down delays. */ static ssize_t bonding_show_downdelay(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); return sysfs_emit(buf, "%d\n", bond->params.downdelay * bond->params.miimon); } static DEVICE_ATTR(downdelay, 0644, bonding_show_downdelay, bonding_sysfs_store_option); static ssize_t bonding_show_updelay(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); return sysfs_emit(buf, "%d\n", bond->params.updelay * bond->params.miimon); } static DEVICE_ATTR(updelay, 0644, bonding_show_updelay, bonding_sysfs_store_option); static ssize_t bonding_show_peer_notif_delay(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); return sysfs_emit(buf, "%d\n", bond->params.peer_notif_delay * bond->params.miimon); } static DEVICE_ATTR(peer_notif_delay, 0644, bonding_show_peer_notif_delay, bonding_sysfs_store_option); /* Show the LACP activity and interval. */ static ssize_t bonding_show_lacp_active(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); const struct bond_opt_value *val; val = bond_opt_get_val(BOND_OPT_LACP_ACTIVE, bond->params.lacp_active); return sysfs_emit(buf, "%s %d\n", val->string, bond->params.lacp_active); } static DEVICE_ATTR(lacp_active, 0644, bonding_show_lacp_active, bonding_sysfs_store_option); static ssize_t bonding_show_lacp_rate(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); const struct bond_opt_value *val; val = bond_opt_get_val(BOND_OPT_LACP_RATE, bond->params.lacp_fast); return sysfs_emit(buf, "%s %d\n", val->string, bond->params.lacp_fast); } static DEVICE_ATTR(lacp_rate, 0644, bonding_show_lacp_rate, bonding_sysfs_store_option); static ssize_t bonding_show_min_links(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); return sysfs_emit(buf, "%u\n", bond->params.min_links); } static DEVICE_ATTR(min_links, 0644, bonding_show_min_links, bonding_sysfs_store_option); static ssize_t bonding_show_ad_select(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); const struct bond_opt_value *val; val = bond_opt_get_val(BOND_OPT_AD_SELECT, bond->params.ad_select); return sysfs_emit(buf, "%s %d\n", val->string, bond->params.ad_select); } static DEVICE_ATTR(ad_select, 0644, bonding_show_ad_select, bonding_sysfs_store_option); /* Show the number of peer notifications to send after a failover event. */ static ssize_t bonding_show_num_peer_notif(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); return sysfs_emit(buf, "%d\n", bond->params.num_peer_notif); } static DEVICE_ATTR(num_grat_arp, 0644, bonding_show_num_peer_notif, bonding_sysfs_store_option); static DEVICE_ATTR(num_unsol_na, 0644, bonding_show_num_peer_notif, bonding_sysfs_store_option); /* Show the MII monitor interval. */ static ssize_t bonding_show_miimon(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); return sysfs_emit(buf, "%d\n", bond->params.miimon); } static DEVICE_ATTR(miimon, 0644, bonding_show_miimon, bonding_sysfs_store_option); /* Show the primary slave. */ static ssize_t bonding_show_primary(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); struct slave *primary; int count = 0; rcu_read_lock(); primary = rcu_dereference(bond->primary_slave); if (primary) count = sysfs_emit(buf, "%s\n", primary->dev->name); rcu_read_unlock(); return count; } static DEVICE_ATTR(primary, 0644, bonding_show_primary, bonding_sysfs_store_option); /* Show the primary_reselect flag. */ static ssize_t bonding_show_primary_reselect(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); const struct bond_opt_value *val; val = bond_opt_get_val(BOND_OPT_PRIMARY_RESELECT, bond->params.primary_reselect); return sysfs_emit(buf, "%s %d\n", val->string, bond->params.primary_reselect); } static DEVICE_ATTR(primary_reselect, 0644, bonding_show_primary_reselect, bonding_sysfs_store_option); /* use_carrier is obsolete, but print value for compatibility */ static ssize_t bonding_show_carrier(struct device *d, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "1\n"); } static DEVICE_ATTR(use_carrier, 0644, bonding_show_carrier, bonding_sysfs_store_option); /* Show currently active_slave. */ static ssize_t bonding_show_active_slave(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); struct net_device *slave_dev; int count = 0; rcu_read_lock(); slave_dev = bond_option_active_slave_get_rcu(bond); if (slave_dev) count = sysfs_emit(buf, "%s\n", slave_dev->name); rcu_read_unlock(); return count; } static DEVICE_ATTR(active_slave, 0644, bonding_show_active_slave, bonding_sysfs_store_option); /* Show link status of the bond interface. */ static ssize_t bonding_show_mii_status(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); bool active = netif_carrier_ok(bond->dev); return sysfs_emit(buf, "%s\n", active ? "up" : "down"); } static DEVICE_ATTR(mii_status, 0444, bonding_show_mii_status, NULL); /* Show current 802.3ad aggregator ID. */ static ssize_t bonding_show_ad_aggregator(struct device *d, struct device_attribute *attr, char *buf) { int count = 0; struct bonding *bond = to_bond(d); if (BOND_MODE(bond) == BOND_MODE_8023AD) { struct ad_info ad_info; count = sysfs_emit(buf, "%d\n", bond_3ad_get_active_agg_info(bond, &ad_info) ? 0 : ad_info.aggregator_id); } return count; } static DEVICE_ATTR(ad_aggregator, 0444, bonding_show_ad_aggregator, NULL); /* Show number of active 802.3ad ports. */ static ssize_t bonding_show_ad_num_ports(struct device *d, struct device_attribute *attr, char *buf) { int count = 0; struct bonding *bond = to_bond(d); if (BOND_MODE(bond) == BOND_MODE_8023AD) { struct ad_info ad_info; count = sysfs_emit(buf, "%d\n", bond_3ad_get_active_agg_info(bond, &ad_info) ? 0 : ad_info.ports); } return count; } static DEVICE_ATTR(ad_num_ports, 0444, bonding_show_ad_num_ports, NULL); /* Show current 802.3ad actor key. */ static ssize_t bonding_show_ad_actor_key(struct device *d, struct device_attribute *attr, char *buf) { int count = 0; struct bonding *bond = to_bond(d); if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN)) { struct ad_info ad_info; count = sysfs_emit(buf, "%d\n", bond_3ad_get_active_agg_info(bond, &ad_info) ? 0 : ad_info.actor_key); } return count; } static DEVICE_ATTR(ad_actor_key, 0444, bonding_show_ad_actor_key, NULL); /* Show current 802.3ad partner key. */ static ssize_t bonding_show_ad_partner_key(struct device *d, struct device_attribute *attr, char *buf) { int count = 0; struct bonding *bond = to_bond(d); if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN)) { struct ad_info ad_info; count = sysfs_emit(buf, "%d\n", bond_3ad_get_active_agg_info(bond, &ad_info) ? 0 : ad_info.partner_key); } return count; } static DEVICE_ATTR(ad_partner_key, 0444, bonding_show_ad_partner_key, NULL); /* Show current 802.3ad partner mac. */ static ssize_t bonding_show_ad_partner_mac(struct device *d, struct device_attribute *attr, char *buf) { int count = 0; struct bonding *bond = to_bond(d); if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN)) { struct ad_info ad_info; if (!bond_3ad_get_active_agg_info(bond, &ad_info)) count = sysfs_emit(buf, "%pM\n", ad_info.partner_system); } return count; } static DEVICE_ATTR(ad_partner_mac, 0444, bonding_show_ad_partner_mac, NULL); /* Show the queue_ids of the slaves in the current bond. */ static ssize_t bonding_show_queue_id(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); struct list_head *iter; struct slave *slave; int res = 0; rcu_read_lock(); bond_for_each_slave_rcu(bond, slave, iter) { if (res > (PAGE_SIZE - IFNAMSIZ - 6)) { /* not enough space for another interface_name:queue_id pair */ if ((PAGE_SIZE - res) > 10) res = PAGE_SIZE - 10; res += sysfs_emit_at(buf, res, "++more++ "); break; } res += sysfs_emit_at(buf, res, "%s:%d ", slave->dev->name, READ_ONCE(slave->queue_id)); } if (res) buf[res-1] = '\n'; /* eat the leftover space */ rcu_read_unlock(); return res; } static DEVICE_ATTR(queue_id, 0644, bonding_show_queue_id, bonding_sysfs_store_option); /* Show the all_slaves_active flag. */ static ssize_t bonding_show_slaves_active(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); return sysfs_emit(buf, "%d\n", bond->params.all_slaves_active); } static DEVICE_ATTR(all_slaves_active, 0644, bonding_show_slaves_active, bonding_sysfs_store_option); /* Show the number of IGMP membership reports to send on link failure */ static ssize_t bonding_show_resend_igmp(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); return sysfs_emit(buf, "%d\n", bond->params.resend_igmp); } static DEVICE_ATTR(resend_igmp, 0644, bonding_show_resend_igmp, bonding_sysfs_store_option); static ssize_t bonding_show_lp_interval(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); return sysfs_emit(buf, "%d\n", bond->params.lp_interval); } static DEVICE_ATTR(lp_interval, 0644, bonding_show_lp_interval, bonding_sysfs_store_option); static ssize_t bonding_show_tlb_dynamic_lb(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); return sysfs_emit(buf, "%d\n", bond->params.tlb_dynamic_lb); } static DEVICE_ATTR(tlb_dynamic_lb, 0644, bonding_show_tlb_dynamic_lb, bonding_sysfs_store_option); static ssize_t bonding_show_packets_per_slave(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); unsigned int packets_per_slave = bond->params.packets_per_slave; return sysfs_emit(buf, "%u\n", packets_per_slave); } static DEVICE_ATTR(packets_per_slave, 0644, bonding_show_packets_per_slave, bonding_sysfs_store_option); static ssize_t bonding_show_ad_actor_sys_prio(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN)) return sysfs_emit(buf, "%hu\n", bond->params.ad_actor_sys_prio); return 0; } static DEVICE_ATTR(ad_actor_sys_prio, 0644, bonding_show_ad_actor_sys_prio, bonding_sysfs_store_option); static ssize_t bonding_show_ad_actor_system(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN)) return sysfs_emit(buf, "%pM\n", bond->params.ad_actor_system); return 0; } static DEVICE_ATTR(ad_actor_system, 0644, bonding_show_ad_actor_system, bonding_sysfs_store_option); static ssize_t bonding_show_ad_user_port_key(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN)) return sysfs_emit(buf, "%hu\n", bond->params.ad_user_port_key); return 0; } static DEVICE_ATTR(ad_user_port_key, 0644, bonding_show_ad_user_port_key, bonding_sysfs_store_option); static struct attribute *per_bond_attrs[] = { &dev_attr_slaves.attr, &dev_attr_mode.attr, &dev_attr_fail_over_mac.attr, &dev_attr_arp_validate.attr, &dev_attr_arp_all_targets.attr, &dev_attr_arp_interval.attr, &dev_attr_arp_ip_target.attr, &dev_attr_downdelay.attr, &dev_attr_updelay.attr, &dev_attr_peer_notif_delay.attr, &dev_attr_lacp_active.attr, &dev_attr_lacp_rate.attr, &dev_attr_ad_select.attr, &dev_attr_xmit_hash_policy.attr, &dev_attr_num_grat_arp.attr, &dev_attr_num_unsol_na.attr, &dev_attr_miimon.attr, &dev_attr_primary.attr, &dev_attr_primary_reselect.attr, &dev_attr_use_carrier.attr, &dev_attr_active_slave.attr, &dev_attr_mii_status.attr, &dev_attr_ad_aggregator.attr, &dev_attr_ad_num_ports.attr, &dev_attr_ad_actor_key.attr, &dev_attr_ad_partner_key.attr, &dev_attr_ad_partner_mac.attr, &dev_attr_queue_id.attr, &dev_attr_all_slaves_active.attr, &dev_attr_resend_igmp.attr, &dev_attr_min_links.attr, &dev_attr_lp_interval.attr, &dev_attr_packets_per_slave.attr, &dev_attr_tlb_dynamic_lb.attr, &dev_attr_ad_actor_sys_prio.attr, &dev_attr_ad_actor_system.attr, &dev_attr_ad_user_port_key.attr, &dev_attr_arp_missed_max.attr, NULL, }; static const struct attribute_group bonding_group = { .name = "bonding", .attrs = per_bond_attrs, }; /* Initialize sysfs. This sets up the bonding_masters file in * /sys/class/net. */ int __net_init bond_create_sysfs(struct bond_net *bn) { int ret; bn->class_attr_bonding_masters = class_attr_bonding_masters; sysfs_attr_init(&bn->class_attr_bonding_masters.attr); ret = netdev_class_create_file_ns(&bn->class_attr_bonding_masters, bn->net); /* Permit multiple loads of the module by ignoring failures to * create the bonding_masters sysfs file. Bonding devices * created by second or subsequent loads of the module will * not be listed in, or controllable by, bonding_masters, but * will have the usual "bonding" sysfs directory. * * This is done to preserve backwards compatibility for * initscripts/sysconfig, which load bonding multiple times to * configure multiple bonding devices. */ if (ret == -EEXIST) { /* Is someone being kinky and naming a device bonding_master? */ if (netdev_name_in_use(bn->net, class_attr_bonding_masters.attr.name)) pr_err("network device named %s already exists in sysfs\n", class_attr_bonding_masters.attr.name); ret = 0; } return ret; } /* Remove /sys/class/net/bonding_masters. */ void __net_exit bond_destroy_sysfs(struct bond_net *bn) { netdev_class_remove_file_ns(&bn->class_attr_bonding_masters, bn->net); } /* Initialize sysfs for each bond. This sets up and registers * the 'bondctl' directory for each individual bond under /sys/class/net. */ void bond_prepare_sysfs_group(struct bonding *bond) { bond->dev->sysfs_groups[0] = &bonding_group; }
3059 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 /* SPDX-License-Identifier: GPL-2.0 */ /* * * Definitions for mount interface. This describes the in the kernel build * linkedlist with mounted filesystems. * * Author: Marco van Wieringen <mvw@planets.elm.net> * */ #ifndef _LINUX_MOUNT_H #define _LINUX_MOUNT_H #include <linux/types.h> #include <asm/barrier.h> struct super_block; struct dentry; struct user_namespace; struct mnt_idmap; struct file_system_type; struct fs_context; struct file; struct path; enum mount_flags { MNT_NOSUID = 0x01, MNT_NODEV = 0x02, MNT_NOEXEC = 0x04, MNT_NOATIME = 0x08, MNT_NODIRATIME = 0x10, MNT_RELATIME = 0x20, MNT_READONLY = 0x40, /* does the user want this to be r/o? */ MNT_NOSYMFOLLOW = 0x80, MNT_SHRINKABLE = 0x100, MNT_INTERNAL = 0x4000, MNT_LOCK_ATIME = 0x040000, MNT_LOCK_NOEXEC = 0x080000, MNT_LOCK_NOSUID = 0x100000, MNT_LOCK_NODEV = 0x200000, MNT_LOCK_READONLY = 0x400000, MNT_LOCKED = 0x800000, MNT_DOOMED = 0x1000000, MNT_SYNC_UMOUNT = 0x2000000, MNT_UMOUNT = 0x8000000, MNT_USER_SETTABLE_MASK = MNT_NOSUID | MNT_NODEV | MNT_NOEXEC | MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME | MNT_READONLY | MNT_NOSYMFOLLOW, MNT_ATIME_MASK = MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME, MNT_INTERNAL_FLAGS = MNT_INTERNAL | MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_LOCKED }; struct vfsmount { struct dentry *mnt_root; /* root of the mounted tree */ struct super_block *mnt_sb; /* pointer to superblock */ int mnt_flags; struct mnt_idmap *mnt_idmap; } __randomize_layout; static inline struct mnt_idmap *mnt_idmap(const struct vfsmount *mnt) { /* Pairs with smp_store_release() in do_idmap_mount(). */ return READ_ONCE(mnt->mnt_idmap); } extern int mnt_want_write(struct vfsmount *mnt); extern int mnt_want_write_file(struct file *file); extern void mnt_drop_write(struct vfsmount *mnt); extern void mnt_drop_write_file(struct file *file); extern void mntput(struct vfsmount *mnt); extern struct vfsmount *mntget(struct vfsmount *mnt); extern void mnt_make_shortterm(struct vfsmount *mnt); extern struct vfsmount *mnt_clone_internal(const struct path *path); extern bool __mnt_is_readonly(const struct vfsmount *mnt); extern bool mnt_may_suid(struct vfsmount *mnt); extern struct vfsmount *clone_private_mount(const struct path *path); int mnt_get_write_access(struct vfsmount *mnt); void mnt_put_write_access(struct vfsmount *mnt); extern struct vfsmount *fc_mount(struct fs_context *fc); extern struct vfsmount *fc_mount_longterm(struct fs_context *fc); extern struct vfsmount *vfs_create_mount(struct fs_context *fc); extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data); extern void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list); extern void mark_mounts_for_expiry(struct list_head *mounts); extern bool path_is_mountpoint(const struct path *path); extern bool our_mnt(struct vfsmount *mnt); extern struct vfsmount *kern_mount(struct file_system_type *); extern void kern_unmount(struct vfsmount *mnt); extern int may_umount_tree(struct vfsmount *); extern int may_umount(struct vfsmount *); int do_mount(const char *, const char __user *, const char *, unsigned long, void *); extern const struct path *collect_paths(const struct path *, struct path *, unsigned); extern void drop_collected_paths(const struct path *, const struct path *); extern void kern_unmount_array(struct vfsmount *mnt[], unsigned int num); extern int cifs_root_data(char **dev, char **opts); #endif /* _LINUX_MOUNT_H */
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __UDF_DECL_H #define __UDF_DECL_H #define pr_fmt(fmt) "UDF-fs: " fmt #include "ecma_167.h" #include "osta_udf.h" #include <linux/fs.h> #include <linux/types.h> #include <linux/buffer_head.h> #include <linux/udf_fs_i.h> #include "udf_sb.h" #include "udfend.h" #include "udf_i.h" #define UDF_DEFAULT_PREALLOC_BLOCKS 8 extern __printf(3, 4) void _udf_err(struct super_block *sb, const char *function, const char *fmt, ...); #define udf_err(sb, fmt, ...) \ _udf_err(sb, __func__, fmt, ##__VA_ARGS__) extern __printf(3, 4) void _udf_warn(struct super_block *sb, const char *function, const char *fmt, ...); #define udf_warn(sb, fmt, ...) \ _udf_warn(sb, __func__, fmt, ##__VA_ARGS__) #define udf_info(fmt, ...) \ pr_info("INFO " fmt, ##__VA_ARGS__) #define udf_debug(fmt, ...) \ pr_debug("%s:%d:%s: " fmt, __FILE__, __LINE__, __func__, ##__VA_ARGS__) #define UDF_EXTENT_LENGTH_MASK 0x3FFFFFFF #define UDF_EXTENT_FLAG_MASK 0xC0000000 #define UDF_INVALID_ID ((uint32_t)-1) #define UDF_NAME_PAD 4 #define UDF_NAME_LEN 254 #define UDF_NAME_LEN_CS0 255 static inline size_t udf_file_entry_alloc_offset(struct inode *inode) { struct udf_inode_info *iinfo = UDF_I(inode); if (iinfo->i_use) return sizeof(struct unallocSpaceEntry); else if (iinfo->i_efe) return sizeof(struct extendedFileEntry) + iinfo->i_lenEAttr; else return sizeof(struct fileEntry) + iinfo->i_lenEAttr; } static inline size_t udf_ext0_offset(struct inode *inode) { if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) return udf_file_entry_alloc_offset(inode); else return 0; } /* computes tag checksum */ u8 udf_tag_checksum(const struct tag *t); typedef uint32_t udf_pblk_t; struct dentry; struct inode; struct task_struct; struct buffer_head; struct super_block; extern const struct export_operations udf_export_ops; extern const struct inode_operations udf_dir_inode_operations; extern const struct file_operations udf_dir_operations; extern const struct inode_operations udf_file_inode_operations; extern const struct file_operations udf_file_operations; extern const struct inode_operations udf_symlink_inode_operations; extern const struct address_space_operations udf_aops; extern const struct address_space_operations udf_symlink_aops; struct udf_fileident_iter { struct inode *dir; /* Directory we are working with */ loff_t pos; /* Logical position in a dir */ struct buffer_head *bh[2]; /* Buffer containing 'pos' and possibly * next buffer if entry straddles * blocks */ struct kernel_lb_addr eloc; /* Start of extent containing 'pos' */ uint32_t elen; /* Length of extent containing 'pos' */ sector_t loffset; /* Block offset of 'pos' within above * extent */ struct extent_position epos; /* Position after the above extent */ struct fileIdentDesc fi; /* Copied directory entry */ uint8_t *name; /* Pointer to entry name */ uint8_t *namebuf; /* Storage for entry name in case * the name is split between two blocks */ }; struct udf_vds_record { uint32_t block; uint32_t volDescSeqNum; }; struct generic_desc { struct tag descTag; __le32 volDescSeqNum; }; /* super.c */ static inline void udf_updated_lvid(struct super_block *sb) { struct buffer_head *bh = UDF_SB(sb)->s_lvid_bh; BUG_ON(!bh); WARN_ON_ONCE(((struct logicalVolIntegrityDesc *) bh->b_data)->integrityType != cpu_to_le32(LVID_INTEGRITY_TYPE_OPEN)); UDF_SB(sb)->s_lvid_dirty = 1; } extern u64 lvid_get_unique_id(struct super_block *sb); struct inode *udf_find_metadata_inode_efe(struct super_block *sb, u32 meta_file_loc, u32 partition_num); /* namei.c */ static inline unsigned int udf_dir_entry_len(struct fileIdentDesc *cfi) { return ALIGN(sizeof(struct fileIdentDesc) + le16_to_cpu(cfi->lengthOfImpUse) + cfi->lengthFileIdent, UDF_NAME_PAD); } /* file.c */ extern long udf_ioctl(struct file *, unsigned int, unsigned long); /* inode.c */ extern struct inode *__udf_iget(struct super_block *, struct kernel_lb_addr *, bool hidden_inode); static inline struct inode *udf_iget_special(struct super_block *sb, struct kernel_lb_addr *ino) { return __udf_iget(sb, ino, true); } static inline struct inode *udf_iget(struct super_block *sb, struct kernel_lb_addr *ino) { return __udf_iget(sb, ino, false); } extern int udf_expand_file_adinicb(struct inode *); extern struct buffer_head *udf_bread(struct inode *inode, udf_pblk_t block, int create, int *err); extern int udf_setsize(struct inode *, loff_t); extern void udf_evict_inode(struct inode *); extern int udf_write_inode(struct inode *, struct writeback_control *wbc); extern int inode_bmap(struct inode *inode, sector_t block, struct extent_position *pos, struct kernel_lb_addr *eloc, uint32_t *elen, sector_t *offset, int8_t *etype); int udf_get_block(struct inode *, sector_t, struct buffer_head *, int); extern int udf_setup_indirect_aext(struct inode *inode, udf_pblk_t block, struct extent_position *epos); extern int __udf_add_aext(struct inode *inode, struct extent_position *epos, struct kernel_lb_addr *eloc, uint32_t elen, int inc); extern int udf_add_aext(struct inode *, struct extent_position *, struct kernel_lb_addr *, uint32_t, int); extern void udf_write_aext(struct inode *, struct extent_position *, struct kernel_lb_addr *, uint32_t, int); extern int8_t udf_delete_aext(struct inode *, struct extent_position); extern int udf_next_aext(struct inode *inode, struct extent_position *epos, struct kernel_lb_addr *eloc, uint32_t *elen, int8_t *etype, int inc); extern int udf_current_aext(struct inode *inode, struct extent_position *epos, struct kernel_lb_addr *eloc, uint32_t *elen, int8_t *etype, int inc); extern void udf_update_extra_perms(struct inode *inode, umode_t mode); /* misc.c */ extern struct genericFormat *udf_add_extendedattr(struct inode *, uint32_t, uint32_t, uint8_t); extern struct genericFormat *udf_get_extendedattr(struct inode *, uint32_t, uint8_t); extern struct buffer_head *udf_read_tagged(struct super_block *, uint32_t, uint32_t, uint16_t *); extern struct buffer_head *udf_read_ptagged(struct super_block *, struct kernel_lb_addr *, uint32_t, uint16_t *); extern void udf_update_tag(char *, int); extern void udf_new_tag(char *, uint16_t, uint16_t, uint16_t, uint32_t, int); /* lowlevel.c */ extern unsigned int udf_get_last_session(struct super_block *); udf_pblk_t udf_get_last_block(struct super_block *); /* partition.c */ extern uint32_t udf_get_pblock(struct super_block *, uint32_t, uint16_t, uint32_t); extern uint32_t udf_get_pblock_virt15(struct super_block *, uint32_t, uint16_t, uint32_t); extern uint32_t udf_get_pblock_virt20(struct super_block *, uint32_t, uint16_t, uint32_t); extern uint32_t udf_get_pblock_spar15(struct super_block *, uint32_t, uint16_t, uint32_t); extern uint32_t udf_get_pblock_meta25(struct super_block *, uint32_t, uint16_t, uint32_t); extern int udf_relocate_blocks(struct super_block *, long, long *); static inline uint32_t udf_get_lb_pblock(struct super_block *sb, struct kernel_lb_addr *loc, uint32_t offset) { return udf_get_pblock(sb, loc->logicalBlockNum, loc->partitionReferenceNum, offset); } /* unicode.c */ extern int udf_get_filename(struct super_block *, const uint8_t *, int, uint8_t *, int); extern int udf_put_filename(struct super_block *, const uint8_t *, int, uint8_t *, int); extern int udf_dstrCS0toChar(struct super_block *, uint8_t *, int, const uint8_t *, int); /* ialloc.c */ extern void udf_free_inode(struct inode *); extern struct inode *udf_new_inode(struct inode *, umode_t); /* truncate.c */ extern void udf_truncate_tail_extent(struct inode *); extern void udf_discard_prealloc(struct inode *); extern int udf_truncate_extents(struct inode *); /* balloc.c */ extern void udf_free_blocks(struct super_block *, struct inode *, struct kernel_lb_addr *, uint32_t, uint32_t); extern int udf_prealloc_blocks(struct super_block *, struct inode *, uint16_t, uint32_t, uint32_t); extern udf_pblk_t udf_new_block(struct super_block *sb, struct inode *inode, uint16_t partition, uint32_t goal, int *err); /* directory.c */ int udf_fiiter_init(struct udf_fileident_iter *iter, struct inode *dir, loff_t pos); int udf_fiiter_advance(struct udf_fileident_iter *iter); void udf_fiiter_release(struct udf_fileident_iter *iter); void udf_fiiter_write_fi(struct udf_fileident_iter *iter, uint8_t *impuse); void udf_fiiter_update_elen(struct udf_fileident_iter *iter, uint32_t new_elen); int udf_fiiter_append_blk(struct udf_fileident_iter *iter); extern struct long_ad *udf_get_filelongad(uint8_t *, int, uint32_t *, int); extern struct short_ad *udf_get_fileshortad(uint8_t *, int, uint32_t *, int); /* udftime.c */ extern void udf_disk_stamp_to_time(struct timespec64 *dest, struct timestamp src); extern void udf_time_to_disk_stamp(struct timestamp *dest, struct timespec64 src); #endif /* __UDF_DECL_H */
12 19 11 4 6 4 4 2 19 19 13 19 17 18 5 4 15 15 15 3 9 9 9 3 3 3 3 3 3 3 9 15 3 1 2 14 2 12 2 10 11 2 1 12 12 10 10 3 3 1 12 2 12 1 11 1 12 7 7 19 1 18 17 16 4 16 16 16 4 16 12 12 12 12 12 3 3 3 3 3 3 3 9 3 3 3 3 5 5 19 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 1 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 // SPDX-License-Identifier: GPL-2.0-or-later /* * net/sched/cls_route.c ROUTE4 classifier. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> */ #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <net/dst.h> #include <net/route.h> #include <net/netlink.h> #include <net/act_api.h> #include <net/pkt_cls.h> #include <net/tc_wrapper.h> /* * 1. For now we assume that route tags < 256. * It allows to use direct table lookups, instead of hash tables. * 2. For now we assume that "from TAG" and "fromdev DEV" statements * are mutually exclusive. * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX" */ struct route4_fastmap { struct route4_filter *filter; u32 id; int iif; }; struct route4_head { struct route4_fastmap fastmap[16]; struct route4_bucket __rcu *table[256 + 1]; struct rcu_head rcu; }; struct route4_bucket { /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */ struct route4_filter __rcu *ht[16 + 16 + 1]; struct rcu_head rcu; }; struct route4_filter { struct route4_filter __rcu *next; u32 id; int iif; struct tcf_result res; struct tcf_exts exts; u32 handle; struct route4_bucket *bkt; struct tcf_proto *tp; struct rcu_work rwork; }; #define ROUTE4_FAILURE ((struct route4_filter *)(-1L)) static inline int route4_fastmap_hash(u32 id, int iif) { return id & 0xF; } static DEFINE_SPINLOCK(fastmap_lock); static void route4_reset_fastmap(struct route4_head *head) { spin_lock_bh(&fastmap_lock); memset(head->fastmap, 0, sizeof(head->fastmap)); spin_unlock_bh(&fastmap_lock); } static void route4_set_fastmap(struct route4_head *head, u32 id, int iif, struct route4_filter *f) { int h = route4_fastmap_hash(id, iif); /* fastmap updates must look atomic to aling id, iff, filter */ spin_lock_bh(&fastmap_lock); head->fastmap[h].id = id; head->fastmap[h].iif = iif; head->fastmap[h].filter = f; spin_unlock_bh(&fastmap_lock); } static inline int route4_hash_to(u32 id) { return id & 0xFF; } static inline int route4_hash_from(u32 id) { return (id >> 16) & 0xF; } static inline int route4_hash_iif(int iif) { return 16 + ((iif >> 16) & 0xF); } static inline int route4_hash_wild(void) { return 32; } #define ROUTE4_APPLY_RESULT() \ { \ *res = f->res; \ if (tcf_exts_has_actions(&f->exts)) { \ int r = tcf_exts_exec(skb, &f->exts, res); \ if (r < 0) { \ dont_cache = 1; \ continue; \ } \ return r; \ } else if (!dont_cache) \ route4_set_fastmap(head, id, iif, f); \ return 0; \ } TC_INDIRECT_SCOPE int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { struct route4_head *head = rcu_dereference_bh(tp->root); struct dst_entry *dst; struct route4_bucket *b; struct route4_filter *f; u32 id, h; int iif, dont_cache = 0; dst = skb_dst(skb); if (!dst) goto failure; id = dst->tclassid; iif = inet_iif(skb); h = route4_fastmap_hash(id, iif); spin_lock(&fastmap_lock); if (id == head->fastmap[h].id && iif == head->fastmap[h].iif && (f = head->fastmap[h].filter) != NULL) { if (f == ROUTE4_FAILURE) { spin_unlock(&fastmap_lock); goto failure; } *res = f->res; spin_unlock(&fastmap_lock); return 0; } spin_unlock(&fastmap_lock); h = route4_hash_to(id); restart: b = rcu_dereference_bh(head->table[h]); if (b) { for (f = rcu_dereference_bh(b->ht[route4_hash_from(id)]); f; f = rcu_dereference_bh(f->next)) if (f->id == id) ROUTE4_APPLY_RESULT(); for (f = rcu_dereference_bh(b->ht[route4_hash_iif(iif)]); f; f = rcu_dereference_bh(f->next)) if (f->iif == iif) ROUTE4_APPLY_RESULT(); for (f = rcu_dereference_bh(b->ht[route4_hash_wild()]); f; f = rcu_dereference_bh(f->next)) ROUTE4_APPLY_RESULT(); } if (h < 256) { h = 256; id &= ~0xFFFF; goto restart; } if (!dont_cache) route4_set_fastmap(head, id, iif, ROUTE4_FAILURE); failure: return -1; } static inline u32 to_hash(u32 id) { u32 h = id & 0xFF; if (id & 0x8000) h += 256; return h; } static inline u32 from_hash(u32 id) { id &= 0xFFFF; if (id == 0xFFFF) return 32; if (!(id & 0x8000)) { if (id > 255) return 256; return id & 0xF; } return 16 + (id & 0xF); } static void *route4_get(struct tcf_proto *tp, u32 handle) { struct route4_head *head = rtnl_dereference(tp->root); struct route4_bucket *b; struct route4_filter *f; unsigned int h1, h2; h1 = to_hash(handle); if (h1 > 256) return NULL; h2 = from_hash(handle >> 16); if (h2 > 32) return NULL; b = rtnl_dereference(head->table[h1]); if (b) { for (f = rtnl_dereference(b->ht[h2]); f; f = rtnl_dereference(f->next)) if (f->handle == handle) return f; } return NULL; } static int route4_init(struct tcf_proto *tp) { struct route4_head *head; head = kzalloc_obj(struct route4_head); if (head == NULL) return -ENOBUFS; rcu_assign_pointer(tp->root, head); return 0; } static void __route4_delete_filter(struct route4_filter *f) { tcf_exts_destroy(&f->exts); tcf_exts_put_net(&f->exts); kfree(f); } static void route4_delete_filter_work(struct work_struct *work) { struct route4_filter *f = container_of(to_rcu_work(work), struct route4_filter, rwork); rtnl_lock(); __route4_delete_filter(f); rtnl_unlock(); } static void route4_queue_work(struct route4_filter *f) { tcf_queue_work(&f->rwork, route4_delete_filter_work); } static void route4_destroy(struct tcf_proto *tp, bool rtnl_held, struct netlink_ext_ack *extack) { struct route4_head *head = rtnl_dereference(tp->root); int h1, h2; if (head == NULL) return; for (h1 = 0; h1 <= 256; h1++) { struct route4_bucket *b; b = rtnl_dereference(head->table[h1]); if (b) { for (h2 = 0; h2 <= 32; h2++) { struct route4_filter *f; while ((f = rtnl_dereference(b->ht[h2])) != NULL) { struct route4_filter *next; next = rtnl_dereference(f->next); RCU_INIT_POINTER(b->ht[h2], next); tcf_unbind_filter(tp, &f->res); if (tcf_exts_get_net(&f->exts)) route4_queue_work(f); else __route4_delete_filter(f); } } RCU_INIT_POINTER(head->table[h1], NULL); kfree_rcu(b, rcu); } } kfree_rcu(head, rcu); } static int route4_delete(struct tcf_proto *tp, void *arg, bool *last, bool rtnl_held, struct netlink_ext_ack *extack) { struct route4_head *head = rtnl_dereference(tp->root); struct route4_filter *f = arg; struct route4_filter __rcu **fp; struct route4_filter *nf; struct route4_bucket *b; unsigned int h = 0; int i, h1; if (!head || !f) return -EINVAL; h = f->handle; b = f->bkt; fp = &b->ht[from_hash(h >> 16)]; for (nf = rtnl_dereference(*fp); nf; fp = &nf->next, nf = rtnl_dereference(*fp)) { if (nf == f) { /* unlink it */ RCU_INIT_POINTER(*fp, rtnl_dereference(f->next)); /* Remove any fastmap lookups that might ref filter * notice we unlink'd the filter so we can't get it * back in the fastmap. */ route4_reset_fastmap(head); /* Delete it */ tcf_unbind_filter(tp, &f->res); tcf_exts_get_net(&f->exts); tcf_queue_work(&f->rwork, route4_delete_filter_work); /* Strip RTNL protected tree */ for (i = 0; i <= 32; i++) { struct route4_filter *rt; rt = rtnl_dereference(b->ht[i]); if (rt) goto out; } /* OK, session has no flows */ RCU_INIT_POINTER(head->table[to_hash(h)], NULL); kfree_rcu(b, rcu); break; } } out: *last = true; for (h1 = 0; h1 <= 256; h1++) { if (rcu_access_pointer(head->table[h1])) { *last = false; break; } } return 0; } static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = { [TCA_ROUTE4_CLASSID] = { .type = NLA_U32 }, [TCA_ROUTE4_TO] = NLA_POLICY_MAX(NLA_U32, 0xFF), [TCA_ROUTE4_FROM] = NLA_POLICY_MAX(NLA_U32, 0xFF), [TCA_ROUTE4_IIF] = NLA_POLICY_MAX(NLA_U32, 0x7FFF), }; static int route4_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, struct route4_filter *f, u32 handle, struct route4_head *head, struct nlattr **tb, struct nlattr *est, int new, u32 flags, struct netlink_ext_ack *extack) { u32 id = 0, to = 0, nhandle = 0x8000; struct route4_filter *fp; unsigned int h1; struct route4_bucket *b; int err; err = tcf_exts_validate(net, tp, tb, est, &f->exts, flags, extack); if (err < 0) return err; if (tb[TCA_ROUTE4_TO]) { if (new && handle & 0x8000) { NL_SET_ERR_MSG(extack, "Invalid handle"); return -EINVAL; } to = nla_get_u32(tb[TCA_ROUTE4_TO]); nhandle = to; } if (tb[TCA_ROUTE4_FROM] && tb[TCA_ROUTE4_IIF]) { NL_SET_ERR_MSG_ATTR(extack, tb[TCA_ROUTE4_FROM], "'from' and 'fromif' are mutually exclusive"); return -EINVAL; } if (tb[TCA_ROUTE4_FROM]) { id = nla_get_u32(tb[TCA_ROUTE4_FROM]); nhandle |= id << 16; } else if (tb[TCA_ROUTE4_IIF]) { id = nla_get_u32(tb[TCA_ROUTE4_IIF]); nhandle |= (id | 0x8000) << 16; } else nhandle |= 0xFFFF << 16; if (handle && new) { nhandle |= handle & 0x7F00; if (nhandle != handle) { NL_SET_ERR_MSG_FMT(extack, "Handle mismatch constructed: %x (expected: %x)", handle, nhandle); return -EINVAL; } } if (!nhandle) { NL_SET_ERR_MSG(extack, "Replacing with handle of 0 is invalid"); return -EINVAL; } h1 = to_hash(nhandle); b = rtnl_dereference(head->table[h1]); if (!b) { b = kzalloc_obj(struct route4_bucket); if (b == NULL) return -ENOBUFS; rcu_assign_pointer(head->table[h1], b); } else { unsigned int h2 = from_hash(nhandle >> 16); for (fp = rtnl_dereference(b->ht[h2]); fp; fp = rtnl_dereference(fp->next)) if (fp->handle == f->handle) return -EEXIST; } if (tb[TCA_ROUTE4_TO]) f->id = to; if (tb[TCA_ROUTE4_FROM]) f->id = to | id<<16; else if (tb[TCA_ROUTE4_IIF]) f->iif = id; f->handle = nhandle; f->bkt = b; f->tp = tp; if (tb[TCA_ROUTE4_CLASSID]) { f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]); tcf_bind_filter(tp, &f->res, base); } return 0; } static int route4_change(struct net *net, struct sk_buff *in_skb, struct tcf_proto *tp, unsigned long base, u32 handle, struct nlattr **tca, void **arg, u32 flags, struct netlink_ext_ack *extack) { struct route4_head *head = rtnl_dereference(tp->root); struct route4_filter __rcu **fp; struct route4_filter *fold, *f1, *pfp, *f = NULL; struct route4_bucket *b; struct nlattr *tb[TCA_ROUTE4_MAX + 1]; unsigned int h, th; int err; bool new = true; if (!handle) { NL_SET_ERR_MSG(extack, "Creating with handle of 0 is invalid"); return -EINVAL; } if (NL_REQ_ATTR_CHECK(extack, NULL, tca, TCA_OPTIONS)) { NL_SET_ERR_MSG_MOD(extack, "Missing options"); return -EINVAL; } err = nla_parse_nested_deprecated(tb, TCA_ROUTE4_MAX, tca[TCA_OPTIONS], route4_policy, NULL); if (err < 0) return err; fold = *arg; if (fold && fold->handle != handle) return -EINVAL; err = -ENOBUFS; f = kzalloc_obj(struct route4_filter); if (!f) goto errout; err = tcf_exts_init(&f->exts, net, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE); if (err < 0) goto errout; if (fold) { f->id = fold->id; f->iif = fold->iif; f->handle = fold->handle; f->tp = fold->tp; f->bkt = fold->bkt; new = false; } err = route4_set_parms(net, tp, base, f, handle, head, tb, tca[TCA_RATE], new, flags, extack); if (err < 0) goto errout; h = from_hash(f->handle >> 16); fp = &f->bkt->ht[h]; for (pfp = rtnl_dereference(*fp); (f1 = rtnl_dereference(*fp)) != NULL; fp = &f1->next) if (f->handle < f1->handle) break; tcf_block_netif_keep_dst(tp->chain->block); rcu_assign_pointer(f->next, f1); rcu_assign_pointer(*fp, f); if (fold) { th = to_hash(fold->handle); h = from_hash(fold->handle >> 16); b = rtnl_dereference(head->table[th]); if (b) { fp = &b->ht[h]; for (pfp = rtnl_dereference(*fp); pfp; fp = &pfp->next, pfp = rtnl_dereference(*fp)) { if (pfp == fold) { rcu_assign_pointer(*fp, fold->next); break; } } } } route4_reset_fastmap(head); *arg = f; if (fold) { tcf_unbind_filter(tp, &fold->res); tcf_exts_get_net(&fold->exts); tcf_queue_work(&fold->rwork, route4_delete_filter_work); } return 0; errout: if (f) tcf_exts_destroy(&f->exts); kfree(f); return err; } static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg, bool rtnl_held) { struct route4_head *head = rtnl_dereference(tp->root); unsigned int h, h1; if (head == NULL || arg->stop) return; for (h = 0; h <= 256; h++) { struct route4_bucket *b = rtnl_dereference(head->table[h]); if (b) { for (h1 = 0; h1 <= 32; h1++) { struct route4_filter *f; for (f = rtnl_dereference(b->ht[h1]); f; f = rtnl_dereference(f->next)) { if (!tc_cls_stats_dump(tp, arg, f)) return; } } } } } static int route4_dump(struct net *net, struct tcf_proto *tp, void *fh, struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) { struct route4_filter *f = fh; struct nlattr *nest; u32 id; if (f == NULL) return skb->len; t->tcm_handle = f->handle; nest = nla_nest_start_noflag(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; if (!(f->handle & 0x8000)) { id = f->id & 0xFF; if (nla_put_u32(skb, TCA_ROUTE4_TO, id)) goto nla_put_failure; } if (f->handle & 0x80000000) { if ((f->handle >> 16) != 0xFFFF && nla_put_u32(skb, TCA_ROUTE4_IIF, f->iif)) goto nla_put_failure; } else { id = f->id >> 16; if (nla_put_u32(skb, TCA_ROUTE4_FROM, id)) goto nla_put_failure; } if (f->res.classid && nla_put_u32(skb, TCA_ROUTE4_CLASSID, f->res.classid)) goto nla_put_failure; if (tcf_exts_dump(skb, &f->exts) < 0) goto nla_put_failure; nla_nest_end(skb, nest); if (tcf_exts_dump_stats(skb, &f->exts) < 0) goto nla_put_failure; return skb->len; nla_put_failure: nla_nest_cancel(skb, nest); return -1; } static void route4_bind_class(void *fh, u32 classid, unsigned long cl, void *q, unsigned long base) { struct route4_filter *f = fh; tc_cls_bind_class(classid, cl, q, &f->res, base); } static struct tcf_proto_ops cls_route4_ops __read_mostly = { .kind = "route", .classify = route4_classify, .init = route4_init, .destroy = route4_destroy, .get = route4_get, .change = route4_change, .delete = route4_delete, .walk = route4_walk, .dump = route4_dump, .bind_class = route4_bind_class, .owner = THIS_MODULE, }; MODULE_ALIAS_NET_CLS("route"); static int __init init_route4(void) { return register_tcf_proto_ops(&cls_route4_ops); } static void __exit exit_route4(void) { unregister_tcf_proto_ops(&cls_route4_ops); } module_init(init_route4) module_exit(exit_route4) MODULE_DESCRIPTION("Routing table realm based TC classifier"); MODULE_LICENSE("GPL");
253 253 221 237 138 138 138 138 138 133 133 209 209 209 251 253 138 209 207 205 250 250 784 784 782 71 65 65 71 71 783 780 784 784 784 784 18 18 781 784 71 759 781 17 17 781 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 // SPDX-License-Identifier: GPL-2.0 /* * linux/mm/page_io.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * Swap reorganised 29.12.95, * Asynchronous swapping added 30.12.95. Stephen Tweedie * Removed race in async swapping. 14.4.1996. Bruno Haible * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman */ #include <linux/mm.h> #include <linux/kernel_stat.h> #include <linux/gfp.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/bio.h> #include <linux/swapops.h> #include <linux/writeback.h> #include <linux/blkdev.h> #include <linux/psi.h> #include <linux/uio.h> #include <linux/sched/task.h> #include <linux/delayacct.h> #include <linux/zswap.h> #include "swap.h" static void __end_swap_bio_write(struct bio *bio) { struct folio *folio = bio_first_folio_all(bio); if (bio->bi_status) { /* * We failed to write the page out to swap-space. * Re-dirty the page in order to avoid it being reclaimed. * Also print a dire warning that things will go BAD (tm) * very quickly. * * Also clear PG_reclaim to avoid folio_rotate_reclaimable() */ folio_mark_dirty(folio); pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n", MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), (unsigned long long)bio->bi_iter.bi_sector); folio_clear_reclaim(folio); } folio_end_writeback(folio); } static void end_swap_bio_write(struct bio *bio) { __end_swap_bio_write(bio); bio_put(bio); } static void __end_swap_bio_read(struct bio *bio) { struct folio *folio = bio_first_folio_all(bio); if (bio->bi_status) { pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n", MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), (unsigned long long)bio->bi_iter.bi_sector); } else { folio_mark_uptodate(folio); } folio_unlock(folio); } static void end_swap_bio_read(struct bio *bio) { __end_swap_bio_read(bio); bio_put(bio); } int generic_swapfile_activate(struct swap_info_struct *sis, struct file *swap_file, sector_t *span) { struct address_space *mapping = swap_file->f_mapping; struct inode *inode = mapping->host; unsigned blocks_per_page; unsigned long page_no; unsigned blkbits; sector_t probe_block; sector_t last_block; sector_t lowest_block = -1; sector_t highest_block = 0; int nr_extents = 0; int ret; blkbits = inode->i_blkbits; blocks_per_page = PAGE_SIZE >> blkbits; /* * Map all the blocks into the extent tree. This code doesn't try * to be very smart. */ probe_block = 0; page_no = 0; last_block = i_size_read(inode) >> blkbits; while ((probe_block + blocks_per_page) <= last_block && page_no < sis->max) { unsigned block_in_page; sector_t first_block; cond_resched(); first_block = probe_block; ret = bmap(inode, &first_block); if (ret || !first_block) goto bad_bmap; /* * It must be PAGE_SIZE aligned on-disk */ if (first_block & (blocks_per_page - 1)) { probe_block++; goto reprobe; } for (block_in_page = 1; block_in_page < blocks_per_page; block_in_page++) { sector_t block; block = probe_block + block_in_page; ret = bmap(inode, &block); if (ret || !block) goto bad_bmap; if (block != first_block + block_in_page) { /* Discontiguity */ probe_block++; goto reprobe; } } first_block >>= (PAGE_SHIFT - blkbits); if (page_no) { /* exclude the header page */ if (first_block < lowest_block) lowest_block = first_block; if (first_block > highest_block) highest_block = first_block; } /* * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks */ ret = add_swap_extent(sis, page_no, 1, first_block); if (ret < 0) goto out; nr_extents += ret; page_no++; probe_block += blocks_per_page; reprobe: continue; } ret = nr_extents; *span = 1 + highest_block - lowest_block; if (page_no == 0) page_no = 1; /* force Empty message */ sis->max = page_no; sis->pages = page_no - 1; out: return ret; bad_bmap: pr_err("swapon: swapfile has holes\n"); ret = -EINVAL; goto out; } static bool is_folio_zero_filled(struct folio *folio) { unsigned int pos, last_pos; unsigned long *data; unsigned int i; last_pos = PAGE_SIZE / sizeof(*data) - 1; for (i = 0; i < folio_nr_pages(folio); i++) { data = kmap_local_folio(folio, i * PAGE_SIZE); /* * Check last word first, incase the page is zero-filled at * the start and has non-zero data at the end, which is common * in real-world workloads. */ if (data[last_pos]) { kunmap_local(data); return false; } for (pos = 0; pos < last_pos; pos++) { if (data[pos]) { kunmap_local(data); return false; } } kunmap_local(data); } return true; } static void swap_zeromap_folio_set(struct folio *folio) { struct obj_cgroup *objcg = get_obj_cgroup_from_folio(folio); struct swap_info_struct *sis = __swap_entry_to_info(folio->swap); int nr_pages = folio_nr_pages(folio); swp_entry_t entry; unsigned int i; for (i = 0; i < folio_nr_pages(folio); i++) { entry = page_swap_entry(folio_page(folio, i)); set_bit(swp_offset(entry), sis->zeromap); } count_vm_events(SWPOUT_ZERO, nr_pages); if (objcg) { count_objcg_events(objcg, SWPOUT_ZERO, nr_pages); obj_cgroup_put(objcg); } } static void swap_zeromap_folio_clear(struct folio *folio) { struct swap_info_struct *sis = __swap_entry_to_info(folio->swap); swp_entry_t entry; unsigned int i; for (i = 0; i < folio_nr_pages(folio); i++) { entry = page_swap_entry(folio_page(folio, i)); clear_bit(swp_offset(entry), sis->zeromap); } } /* * We may have stale swap cache pages in memory: notice * them here and get rid of the unnecessary final write. */ int swap_writeout(struct folio *folio, struct swap_iocb **swap_plug) { int ret = 0; if (folio_free_swap(folio)) goto out_unlock; /* * Arch code may have to preserve more data than just the page * contents, e.g. memory tags. */ ret = arch_prepare_to_swap(folio); if (ret) { folio_mark_dirty(folio); goto out_unlock; } /* * Use a bitmap (zeromap) to avoid doing IO for zero-filled pages. * The bits in zeromap are protected by the locked swapcache folio * and atomic updates are used to protect against read-modify-write * corruption due to other zero swap entries seeing concurrent updates. */ if (is_folio_zero_filled(folio)) { swap_zeromap_folio_set(folio); goto out_unlock; } /* * Clear bits this folio occupies in the zeromap to prevent zero data * being read in from any previous zero writes that occupied the same * swap entries. */ swap_zeromap_folio_clear(folio); if (zswap_store(folio)) { count_mthp_stat(folio_order(folio), MTHP_STAT_ZSWPOUT); goto out_unlock; } if (!mem_cgroup_zswap_writeback_enabled(folio_memcg(folio))) { folio_mark_dirty(folio); return AOP_WRITEPAGE_ACTIVATE; } __swap_writepage(folio, swap_plug); return 0; out_unlock: folio_unlock(folio); return ret; } static inline void count_swpout_vm_event(struct folio *folio) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (unlikely(folio_test_pmd_mappable(folio))) { count_memcg_folio_events(folio, THP_SWPOUT, 1); count_vm_event(THP_SWPOUT); } #endif count_mthp_stat(folio_order(folio), MTHP_STAT_SWPOUT); count_memcg_folio_events(folio, PSWPOUT, folio_nr_pages(folio)); count_vm_events(PSWPOUT, folio_nr_pages(folio)); } #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) static void bio_associate_blkg_from_page(struct bio *bio, struct folio *folio) { struct cgroup_subsys_state *css; struct mem_cgroup *memcg; memcg = folio_memcg(folio); if (!memcg) return; rcu_read_lock(); css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys); bio_associate_blkg_from_css(bio, css); rcu_read_unlock(); } #else #define bio_associate_blkg_from_page(bio, folio) do { } while (0) #endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */ struct swap_iocb { struct kiocb iocb; struct bio_vec bvec[SWAP_CLUSTER_MAX]; int pages; int len; }; static mempool_t *sio_pool; int sio_pool_init(void) { if (!sio_pool) { mempool_t *pool = mempool_create_kmalloc_pool( SWAP_CLUSTER_MAX, sizeof(struct swap_iocb)); if (cmpxchg(&sio_pool, NULL, pool)) mempool_destroy(pool); } if (!sio_pool) return -ENOMEM; return 0; } static void sio_write_complete(struct kiocb *iocb, long ret) { struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb); struct page *page = sio->bvec[0].bv_page; int p; if (ret != sio->len) { /* * In the case of swap-over-nfs, this can be a * temporary failure if the system has limited * memory for allocating transmit buffers. * Mark the page dirty and avoid * folio_rotate_reclaimable but rate-limit the * messages. */ pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n", ret, swap_dev_pos(page_swap_entry(page))); for (p = 0; p < sio->pages; p++) { page = sio->bvec[p].bv_page; set_page_dirty(page); ClearPageReclaim(page); } } for (p = 0; p < sio->pages; p++) end_page_writeback(sio->bvec[p].bv_page); mempool_free(sio, sio_pool); } static void swap_writepage_fs(struct folio *folio, struct swap_iocb **swap_plug) { struct swap_iocb *sio = swap_plug ? *swap_plug : NULL; struct swap_info_struct *sis = __swap_entry_to_info(folio->swap); struct file *swap_file = sis->swap_file; loff_t pos = swap_dev_pos(folio->swap); count_swpout_vm_event(folio); folio_start_writeback(folio); folio_unlock(folio); if (sio) { if (sio->iocb.ki_filp != swap_file || sio->iocb.ki_pos + sio->len != pos) { swap_write_unplug(sio); sio = NULL; } } if (!sio) { sio = mempool_alloc(sio_pool, GFP_NOIO); init_sync_kiocb(&sio->iocb, swap_file); sio->iocb.ki_complete = sio_write_complete; sio->iocb.ki_pos = pos; sio->pages = 0; sio->len = 0; } bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0); sio->len += folio_size(folio); sio->pages += 1; if (sio->pages == ARRAY_SIZE(sio->bvec) || !swap_plug) { swap_write_unplug(sio); sio = NULL; } if (swap_plug) *swap_plug = sio; } static void swap_writepage_bdev_sync(struct folio *folio, struct swap_info_struct *sis) { struct bio_vec bv; struct bio bio; bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_WRITE | REQ_SWAP); bio.bi_iter.bi_sector = swap_folio_sector(folio); bio_add_folio_nofail(&bio, folio, folio_size(folio), 0); bio_associate_blkg_from_page(&bio, folio); count_swpout_vm_event(folio); folio_start_writeback(folio); folio_unlock(folio); submit_bio_wait(&bio); __end_swap_bio_write(&bio); } static void swap_writepage_bdev_async(struct folio *folio, struct swap_info_struct *sis) { struct bio *bio; bio = bio_alloc(sis->bdev, 1, REQ_OP_WRITE | REQ_SWAP, GFP_NOIO); bio->bi_iter.bi_sector = swap_folio_sector(folio); bio->bi_end_io = end_swap_bio_write; bio_add_folio_nofail(bio, folio, folio_size(folio), 0); bio_associate_blkg_from_page(bio, folio); count_swpout_vm_event(folio); folio_start_writeback(folio); folio_unlock(folio); submit_bio(bio); } void __swap_writepage(struct folio *folio, struct swap_iocb **swap_plug) { struct swap_info_struct *sis = __swap_entry_to_info(folio->swap); VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio); /* * ->flags can be updated non-atomically (scan_swap_map_slots), * but that will never affect SWP_FS_OPS, so the data_race * is safe. */ if (data_race(sis->flags & SWP_FS_OPS)) swap_writepage_fs(folio, swap_plug); /* * ->flags can be updated non-atomically (scan_swap_map_slots), * but that will never affect SWP_SYNCHRONOUS_IO, so the data_race * is safe. */ else if (data_race(sis->flags & SWP_SYNCHRONOUS_IO)) swap_writepage_bdev_sync(folio, sis); else swap_writepage_bdev_async(folio, sis); } void swap_write_unplug(struct swap_iocb *sio) { struct iov_iter from; struct address_space *mapping = sio->iocb.ki_filp->f_mapping; int ret; iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len); ret = mapping->a_ops->swap_rw(&sio->iocb, &from); if (ret != -EIOCBQUEUED) sio_write_complete(&sio->iocb, ret); } static void sio_read_complete(struct kiocb *iocb, long ret) { struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb); int p; if (ret == sio->len) { for (p = 0; p < sio->pages; p++) { struct folio *folio = page_folio(sio->bvec[p].bv_page); count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN); count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio)); folio_mark_uptodate(folio); folio_unlock(folio); } count_vm_events(PSWPIN, sio->pages); } else { for (p = 0; p < sio->pages; p++) { struct folio *folio = page_folio(sio->bvec[p].bv_page); folio_unlock(folio); } pr_alert_ratelimited("Read-error on swap-device\n"); } mempool_free(sio, sio_pool); } static bool swap_read_folio_zeromap(struct folio *folio) { int nr_pages = folio_nr_pages(folio); struct obj_cgroup *objcg; bool is_zeromap; /* * Swapping in a large folio that is partially in the zeromap is not * currently handled. Return true without marking the folio uptodate so * that an IO error is emitted (e.g. do_swap_page() will sigbus). */ if (WARN_ON_ONCE(swap_zeromap_batch(folio->swap, nr_pages, &is_zeromap) != nr_pages)) return true; if (!is_zeromap) return false; objcg = get_obj_cgroup_from_folio(folio); count_vm_events(SWPIN_ZERO, nr_pages); if (objcg) { count_objcg_events(objcg, SWPIN_ZERO, nr_pages); obj_cgroup_put(objcg); } folio_zero_range(folio, 0, folio_size(folio)); folio_mark_uptodate(folio); return true; } static void swap_read_folio_fs(struct folio *folio, struct swap_iocb **plug) { struct swap_info_struct *sis = __swap_entry_to_info(folio->swap); struct swap_iocb *sio = NULL; loff_t pos = swap_dev_pos(folio->swap); if (plug) sio = *plug; if (sio) { if (sio->iocb.ki_filp != sis->swap_file || sio->iocb.ki_pos + sio->len != pos) { swap_read_unplug(sio); sio = NULL; } } if (!sio) { sio = mempool_alloc(sio_pool, GFP_KERNEL); init_sync_kiocb(&sio->iocb, sis->swap_file); sio->iocb.ki_pos = pos; sio->iocb.ki_complete = sio_read_complete; sio->pages = 0; sio->len = 0; } bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0); sio->len += folio_size(folio); sio->pages += 1; if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) { swap_read_unplug(sio); sio = NULL; } if (plug) *plug = sio; } static void swap_read_folio_bdev_sync(struct folio *folio, struct swap_info_struct *sis) { struct bio_vec bv; struct bio bio; bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ); bio.bi_iter.bi_sector = swap_folio_sector(folio); bio_add_folio_nofail(&bio, folio, folio_size(folio), 0); /* * Keep this task valid during swap readpage because the oom killer may * attempt to access it in the page fault retry time check. */ get_task_struct(current); count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN); count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio)); count_vm_events(PSWPIN, folio_nr_pages(folio)); submit_bio_wait(&bio); __end_swap_bio_read(&bio); put_task_struct(current); } static void swap_read_folio_bdev_async(struct folio *folio, struct swap_info_struct *sis) { struct bio *bio; bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL); bio->bi_iter.bi_sector = swap_folio_sector(folio); bio->bi_end_io = end_swap_bio_read; bio_add_folio_nofail(bio, folio, folio_size(folio), 0); count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN); count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio)); count_vm_events(PSWPIN, folio_nr_pages(folio)); submit_bio(bio); } void swap_read_folio(struct folio *folio, struct swap_iocb **plug) { struct swap_info_struct *sis = __swap_entry_to_info(folio->swap); bool synchronous = sis->flags & SWP_SYNCHRONOUS_IO; bool workingset = folio_test_workingset(folio); unsigned long pflags; bool in_thrashing; VM_BUG_ON_FOLIO(!folio_test_swapcache(folio) && !synchronous, folio); VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); VM_BUG_ON_FOLIO(folio_test_uptodate(folio), folio); /* * Count submission time as memory stall and delay. When the device * is congested, or the submitting cgroup IO-throttled, submission * can be a significant part of overall IO time. */ if (workingset) { delayacct_thrashing_start(&in_thrashing); psi_memstall_enter(&pflags); } delayacct_swapin_start(); if (swap_read_folio_zeromap(folio)) { folio_unlock(folio); goto finish; } if (zswap_load(folio) != -ENOENT) goto finish; /* We have to read from slower devices. Increase zswap protection. */ zswap_folio_swapin(folio); if (data_race(sis->flags & SWP_FS_OPS)) { swap_read_folio_fs(folio, plug); } else if (synchronous) { swap_read_folio_bdev_sync(folio, sis); } else { swap_read_folio_bdev_async(folio, sis); } finish: if (workingset) { delayacct_thrashing_end(&in_thrashing); psi_memstall_leave(&pflags); } delayacct_swapin_end(); } void __swap_read_unplug(struct swap_iocb *sio) { struct iov_iter from; struct address_space *mapping = sio->iocb.ki_filp->f_mapping; int ret; iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->pages, sio->len); ret = mapping->a_ops->swap_rw(&sio->iocb, &from); if (ret != -EIOCBQUEUED) sio_read_complete(&sio->iocb, ret); }
3 1 3 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * NetLabel Network Address Lists * * This file contains network address list functions used to manage ordered * lists of network addresses for use by the NetLabel subsystem. The NetLabel * system manages static and dynamic label mappings for network protocols such * as CIPSO and RIPSO. * * Author: Paul Moore <paul@paul-moore.com> */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2008 */ #ifndef _NETLABEL_ADDRLIST_H #define _NETLABEL_ADDRLIST_H #include <linux/types.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/in6.h> #include <linux/audit.h> /** * struct netlbl_af4list - NetLabel IPv4 address list * @addr: IPv4 address * @mask: IPv4 address mask * @valid: valid flag * @list: list structure, used internally */ struct netlbl_af4list { __be32 addr; __be32 mask; u32 valid; struct list_head list; }; /** * struct netlbl_af6list - NetLabel IPv6 address list * @addr: IPv6 address * @mask: IPv6 address mask * @valid: valid flag * @list: list structure, used internally */ struct netlbl_af6list { struct in6_addr addr; struct in6_addr mask; u32 valid; struct list_head list; }; #define __af4list_entry(ptr) container_of(ptr, struct netlbl_af4list, list) static inline struct netlbl_af4list *__af4list_valid(struct list_head *s, struct list_head *h) { struct list_head *i = s; struct netlbl_af4list *n = __af4list_entry(s); while (i != h && !n->valid) { i = i->next; n = __af4list_entry(i); } return n; } static inline struct netlbl_af4list *__af4list_valid_rcu(struct list_head *s, struct list_head *h) { struct list_head *i = s; struct netlbl_af4list *n = __af4list_entry(s); while (i != h && !n->valid) { i = rcu_dereference(list_next_rcu(i)); n = __af4list_entry(i); } return n; } #define netlbl_af4list_foreach(iter, head) \ for (iter = __af4list_valid((head)->next, head); \ &iter->list != (head); \ iter = __af4list_valid(iter->list.next, head)) #define netlbl_af4list_foreach_rcu(iter, head) \ for (iter = __af4list_valid_rcu((head)->next, head); \ &iter->list != (head); \ iter = __af4list_valid_rcu(iter->list.next, head)) #define netlbl_af4list_foreach_safe(iter, tmp, head) \ for (iter = __af4list_valid((head)->next, head), \ tmp = __af4list_valid(iter->list.next, head); \ &iter->list != (head); \ iter = tmp, tmp = __af4list_valid(iter->list.next, head)) int netlbl_af4list_add(struct netlbl_af4list *entry, struct list_head *head); struct netlbl_af4list *netlbl_af4list_remove(__be32 addr, __be32 mask, struct list_head *head); void netlbl_af4list_remove_entry(struct netlbl_af4list *entry); struct netlbl_af4list *netlbl_af4list_search(__be32 addr, struct list_head *head); struct netlbl_af4list *netlbl_af4list_search_exact(__be32 addr, __be32 mask, struct list_head *head); #ifdef CONFIG_AUDIT void netlbl_af4list_audit_addr(struct audit_buffer *audit_buf, int src, const char *dev, __be32 addr, __be32 mask); #else static inline void netlbl_af4list_audit_addr(struct audit_buffer *audit_buf, int src, const char *dev, __be32 addr, __be32 mask) { } #endif #if IS_ENABLED(CONFIG_IPV6) #define __af6list_entry(ptr) container_of(ptr, struct netlbl_af6list, list) static inline struct netlbl_af6list *__af6list_valid(struct list_head *s, struct list_head *h) { struct list_head *i = s; struct netlbl_af6list *n = __af6list_entry(s); while (i != h && !n->valid) { i = i->next; n = __af6list_entry(i); } return n; } static inline struct netlbl_af6list *__af6list_valid_rcu(struct list_head *s, struct list_head *h) { struct list_head *i = s; struct netlbl_af6list *n = __af6list_entry(s); while (i != h && !n->valid) { i = rcu_dereference(list_next_rcu(i)); n = __af6list_entry(i); } return n; } #define netlbl_af6list_foreach(iter, head) \ for (iter = __af6list_valid((head)->next, head); \ &iter->list != (head); \ iter = __af6list_valid(iter->list.next, head)) #define netlbl_af6list_foreach_rcu(iter, head) \ for (iter = __af6list_valid_rcu((head)->next, head); \ &iter->list != (head); \ iter = __af6list_valid_rcu(iter->list.next, head)) #define netlbl_af6list_foreach_safe(iter, tmp, head) \ for (iter = __af6list_valid((head)->next, head), \ tmp = __af6list_valid(iter->list.next, head); \ &iter->list != (head); \ iter = tmp, tmp = __af6list_valid(iter->list.next, head)) int netlbl_af6list_add(struct netlbl_af6list *entry, struct list_head *head); struct netlbl_af6list *netlbl_af6list_remove(const struct in6_addr *addr, const struct in6_addr *mask, struct list_head *head); void netlbl_af6list_remove_entry(struct netlbl_af6list *entry); struct netlbl_af6list *netlbl_af6list_search(const struct in6_addr *addr, struct list_head *head); struct netlbl_af6list *netlbl_af6list_search_exact(const struct in6_addr *addr, const struct in6_addr *mask, struct list_head *head); #ifdef CONFIG_AUDIT void netlbl_af6list_audit_addr(struct audit_buffer *audit_buf, int src, const char *dev, const struct in6_addr *addr, const struct in6_addr *mask); #else static inline void netlbl_af6list_audit_addr(struct audit_buffer *audit_buf, int src, const char *dev, const struct in6_addr *addr, const struct in6_addr *mask) { } #endif #endif /* IPV6 */ #endif
5 3 9 4 9 6 6 6 6 2 4 2 2 6 6 3 6 6 6 4 9 6 2 4 1 4 3 2 2 4 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org> */ /* Kernel module implementing an IP set type: the hash:ip type */ #include <linux/jhash.h> #include <linux/module.h> #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <linux/random.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/netlink.h> #include <net/tcp.h> #include <linux/netfilter.h> #include <linux/netfilter/ipset/pfxlen.h> #include <linux/netfilter/ipset/ip_set.h> #include <linux/netfilter/ipset/ip_set_hash.h> #define IPSET_TYPE_REV_MIN 0 /* 1 Counters support */ /* 2 Comments support */ /* 3 Forceadd support */ /* 4 skbinfo support */ /* 5 bucketsize, initval support */ #define IPSET_TYPE_REV_MAX 6 /* bitmask support */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>"); IP_SET_MODULE_DESC("hash:ip", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX); MODULE_ALIAS("ip_set_hash:ip"); /* Type specific function prefix */ #define HTYPE hash_ip #define IP_SET_HASH_WITH_NETMASK #define IP_SET_HASH_WITH_BITMASK /* IPv4 variant */ /* Member elements */ struct hash_ip4_elem { /* Zero valued IP addresses cannot be stored */ __be32 ip; }; /* Common functions */ static bool hash_ip4_data_equal(const struct hash_ip4_elem *e1, const struct hash_ip4_elem *e2, u32 *multi) { return e1->ip == e2->ip; } static bool hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *e) { if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, e->ip)) goto nla_put_failure; return false; nla_put_failure: return true; } static void hash_ip4_data_next(struct hash_ip4_elem *next, const struct hash_ip4_elem *e) { next->ip = e->ip; } #define MTYPE hash_ip4 #define HOST_MASK 32 #include "ip_set_hash_gen.h" static int hash_ip4_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, struct ip_set_adt_opt *opt) { const struct hash_ip4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ip4_elem e = { 0 }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); __be32 ip; ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &ip); ip &= h->bitmask.ip; if (ip == 0) return -EINVAL; e.ip = ip; return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); } static int hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { struct hash_ip4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ip4_elem e = { 0 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip = 0, ip_to = 0, hosts, i = 0; int ret = 0; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); if (unlikely(!tb[IPSET_ATTR_IP])) return -IPSET_ERR_PROTOCOL; ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); if (ret) return ret; ret = ip_set_get_extensions(set, tb, &ext); if (ret) return ret; ip &= ntohl(h->bitmask.ip); e.ip = htonl(ip); if (e.ip == 0) return -IPSET_ERR_HASH_ELEM; if (adt == IPSET_TEST) return adtfn(set, &e, &ext, &ext, flags); ip_to = ip; if (tb[IPSET_ATTR_IP_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) return ret; if (ip > ip_to) { if (ip_to == 0) return -IPSET_ERR_HASH_ELEM; swap(ip, ip_to); } } else if (tb[IPSET_ATTR_CIDR]) { u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); if (!cidr || cidr > HOST_MASK) return -IPSET_ERR_INVALID_CIDR; ip_set_mask_from_to(ip, ip_to, cidr); } hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1); if (retried) ip = ntohl(h->next.ip); for (; ip <= ip_to; i++) { e.ip = htonl(ip); if (i > IPSET_MAX_RANGE) { hash_ip4_data_next(&h->next, &e); return -ERANGE; } ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; ip += hosts; if (ip == 0) return 0; ret = 0; } return ret; } /* IPv6 variant */ /* Member elements */ struct hash_ip6_elem { union nf_inet_addr ip; }; /* Common functions */ static bool hash_ip6_data_equal(const struct hash_ip6_elem *ip1, const struct hash_ip6_elem *ip2, u32 *multi) { return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6); } static bool hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *e) { if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6)) goto nla_put_failure; return false; nla_put_failure: return true; } static void hash_ip6_data_next(struct hash_ip6_elem *next, const struct hash_ip6_elem *e) { } #undef MTYPE #undef HOST_MASK #define MTYPE hash_ip6 #define HOST_MASK 128 #define IP_SET_EMIT_CREATE #include "ip_set_hash_gen.h" static int hash_ip6_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, struct ip_set_adt_opt *opt) { const struct hash_ip6 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ip6_elem e = { { .all = { 0 } } }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6); nf_inet_addr_mask_inplace(&e.ip, &h->bitmask); if (ipv6_addr_any(&e.ip.in6)) return -EINVAL; return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); } static int hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { const struct hash_ip6 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ip6_elem e = { { .all = { 0 } } }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); int ret; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); if (unlikely(!tb[IPSET_ATTR_IP])) return -IPSET_ERR_PROTOCOL; if (unlikely(tb[IPSET_ATTR_IP_TO])) return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; if (unlikely(tb[IPSET_ATTR_CIDR])) { u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); if (cidr != HOST_MASK) return -IPSET_ERR_INVALID_CIDR; } ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip); if (ret) return ret; ret = ip_set_get_extensions(set, tb, &ext); if (ret) return ret; nf_inet_addr_mask_inplace(&e.ip, &h->bitmask); if (ipv6_addr_any(&e.ip.in6)) return -IPSET_ERR_HASH_ELEM; ret = adtfn(set, &e, &ext, &ext, flags); return ip_set_eexist(ret, flags) ? 0 : ret; } static struct ip_set_type hash_ip_type __read_mostly = { .name = "hash:ip", .protocol = IPSET_PROTOCOL, .features = IPSET_TYPE_IP, .dimension = IPSET_DIM_ONE, .family = NFPROTO_UNSPEC, .revision_min = IPSET_TYPE_REV_MIN, .revision_max = IPSET_TYPE_REV_MAX, .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE, .create = hash_ip_create, .create_policy = { [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, [IPSET_ATTR_INITVAL] = { .type = NLA_U32 }, [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 }, [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_NETMASK] = { .type = NLA_U8 }, [IPSET_ATTR_BITMASK] = { .type = NLA_NESTED }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, }, .adt_policy = { [IPSET_ATTR_IP] = { .type = NLA_NESTED }, [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING, .len = IPSET_MAX_COMMENT_SIZE }, [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 }, [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 }, [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 }, }, .me = THIS_MODULE, }; static int __init hash_ip_init(void) { return ip_set_type_register(&hash_ip_type); } static void __exit hash_ip_fini(void) { rcu_barrier(); ip_set_type_unregister(&hash_ip_type); } module_init(hash_ip_init); module_exit(hash_ip_fini);
17 17 16 17 17 17 19 19 19 19 19 15 15 14 15 15 15 15 15 25 19 19 19 19 25 25 19 15 15 15 15 15 3 3 3 2 3 3 3 3 3 3 2 3 25 19 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 // SPDX-License-Identifier: GPL-2.0 /* Copyright (C) B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich */ #include "send.h" #include "main.h" #include <linux/atomic.h> #include <linux/bug.h> #include <linux/byteorder/generic.h> #include <linux/container_of.h> #include <linux/errno.h> #include <linux/etherdevice.h> #include <linux/gfp.h> #include <linux/if.h> #include <linux/if_ether.h> #include <linux/jiffies.h> #include <linux/kref.h> #include <linux/list.h> #include <linux/netdevice.h> #include <linux/printk.h> #include <linux/rcupdate.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/stddef.h> #include <linux/workqueue.h> #include "distributed-arp-table.h" #include "fragmentation.h" #include "gateway_client.h" #include "hard-interface.h" #include "log.h" #include "mesh-interface.h" #include "originator.h" #include "routing.h" #include "translation-table.h" static void batadv_send_outstanding_bcast_packet(struct work_struct *work); /** * batadv_send_skb_packet() - send an already prepared packet * @skb: the packet to send * @hard_iface: the interface to use to send the broadcast packet * @dst_addr: the payload destination * * Send out an already prepared packet to the given neighbor or broadcast it * using the specified interface. Either hard_iface or neigh_node must be not * NULL. * If neigh_node is NULL, then the packet is broadcasted using hard_iface, * otherwise it is sent as unicast to the given neighbor. * * Regardless of the return value, the skb is consumed. * * Return: A negative errno code is returned on a failure. A success does not * guarantee the frame will be transmitted as it may be dropped due * to congestion or traffic shaping. */ int batadv_send_skb_packet(struct sk_buff *skb, struct batadv_hard_iface *hard_iface, const u8 *dst_addr) { struct ethhdr *ethhdr; int ret; if (hard_iface->if_status != BATADV_IF_ACTIVE) goto send_skb_err; if (unlikely(!hard_iface->net_dev)) goto send_skb_err; if (!(hard_iface->net_dev->flags & IFF_UP)) { pr_warn("Interface %s is not up - can't send packet via that interface!\n", hard_iface->net_dev->name); goto send_skb_err; } /* push to the ethernet header. */ if (batadv_skb_head_push(skb, ETH_HLEN) < 0) goto send_skb_err; skb_reset_mac_header(skb); ethhdr = eth_hdr(skb); ether_addr_copy(ethhdr->h_source, hard_iface->net_dev->dev_addr); ether_addr_copy(ethhdr->h_dest, dst_addr); ethhdr->h_proto = htons(ETH_P_BATMAN); skb_set_network_header(skb, ETH_HLEN); skb->protocol = htons(ETH_P_BATMAN); skb->dev = hard_iface->net_dev; /* dev_queue_xmit() returns a negative result on error. However on * congestion and traffic shaping, it drops and returns NET_XMIT_DROP * (which is > 0). This will not be treated as an error. */ ret = dev_queue_xmit(skb); return net_xmit_eval(ret); send_skb_err: kfree_skb(skb); return NET_XMIT_DROP; } /** * batadv_send_broadcast_skb() - Send broadcast packet via hard interface * @skb: packet to be transmitted (with batadv header and no outer eth header) * @hard_iface: outgoing interface * * Return: A negative errno code is returned on a failure. A success does not * guarantee the frame will be transmitted as it may be dropped due * to congestion or traffic shaping. */ int batadv_send_broadcast_skb(struct sk_buff *skb, struct batadv_hard_iface *hard_iface) { static const u8 broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; return batadv_send_skb_packet(skb, hard_iface, broadcast_addr); } /** * batadv_send_unicast_skb() - Send unicast packet to neighbor * @skb: packet to be transmitted (with batadv header and no outer eth header) * @neigh: neighbor which is used as next hop to destination * * Return: A negative errno code is returned on a failure. A success does not * guarantee the frame will be transmitted as it may be dropped due * to congestion or traffic shaping. */ int batadv_send_unicast_skb(struct sk_buff *skb, struct batadv_neigh_node *neigh) { #ifdef CONFIG_BATMAN_ADV_BATMAN_V struct batadv_hardif_neigh_node *hardif_neigh; #endif int ret; ret = batadv_send_skb_packet(skb, neigh->if_incoming, neigh->addr); #ifdef CONFIG_BATMAN_ADV_BATMAN_V hardif_neigh = batadv_hardif_neigh_get(neigh->if_incoming, neigh->addr); if (hardif_neigh && ret != NET_XMIT_DROP) hardif_neigh->bat_v.last_unicast_tx = jiffies; batadv_hardif_neigh_put(hardif_neigh); #endif return ret; } /** * batadv_send_skb_to_orig() - Lookup next-hop and transmit skb. * @skb: Packet to be transmitted. * @orig_node: Final destination of the packet. * @recv_if: Interface used when receiving the packet (can be NULL). * * Looks up the best next-hop towards the passed originator and passes the * skb on for preparation of MAC header. If the packet originated from this * host, NULL can be passed as recv_if and no interface alternating is * attempted. * * Return: negative errno code on a failure, -EINPROGRESS if the skb is * buffered for later transmit or the NET_XMIT status returned by the * lower routine if the packet has been passed down. */ int batadv_send_skb_to_orig(struct sk_buff *skb, struct batadv_orig_node *orig_node, struct batadv_hard_iface *recv_if) { struct batadv_priv *bat_priv = orig_node->bat_priv; struct batadv_neigh_node *neigh_node; int ret; /* batadv_find_router() increases neigh_nodes refcount if found. */ neigh_node = batadv_find_router(bat_priv, orig_node, recv_if); if (!neigh_node) { ret = -EINVAL; goto free_skb; } /* Check if the skb is too large to send in one piece and fragment * it if needed. */ if (atomic_read(&bat_priv->fragmentation) && skb->len > neigh_node->if_incoming->net_dev->mtu) { /* Fragment and send packet. */ ret = batadv_frag_send_packet(skb, orig_node, neigh_node); /* skb was consumed */ skb = NULL; goto put_neigh_node; } ret = batadv_send_unicast_skb(skb, neigh_node); /* skb was consumed */ skb = NULL; put_neigh_node: batadv_neigh_node_put(neigh_node); free_skb: kfree_skb(skb); return ret; } /** * batadv_send_skb_push_fill_unicast() - extend the buffer and initialize the * common fields for unicast packets * @skb: the skb carrying the unicast header to initialize * @hdr_size: amount of bytes to push at the beginning of the skb * @orig_node: the destination node * * Return: false if the buffer extension was not possible or true otherwise. */ static bool batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size, struct batadv_orig_node *orig_node) { struct batadv_unicast_packet *unicast_packet; u8 ttvn = (u8)atomic_read(&orig_node->last_ttvn); if (batadv_skb_head_push(skb, hdr_size) < 0) return false; unicast_packet = (struct batadv_unicast_packet *)skb->data; unicast_packet->version = BATADV_COMPAT_VERSION; /* batman packet type: unicast */ unicast_packet->packet_type = BATADV_UNICAST; /* set unicast ttl */ unicast_packet->ttl = BATADV_TTL; /* copy the destination for faster routing */ ether_addr_copy(unicast_packet->dest, orig_node->orig); /* set the destination tt version number */ unicast_packet->ttvn = ttvn; return true; } /** * batadv_send_skb_prepare_unicast() - encapsulate an skb with a unicast header * @skb: the skb containing the payload to encapsulate * @orig_node: the destination node * * Return: false if the payload could not be encapsulated or true otherwise. */ static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb, struct batadv_orig_node *orig_node) { size_t uni_size = sizeof(struct batadv_unicast_packet); return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node); } /** * batadv_send_skb_prepare_unicast_4addr() - encapsulate an skb with a * unicast 4addr header * @bat_priv: the bat priv with all the mesh interface information * @skb: the skb containing the payload to encapsulate * @orig: the destination node * @packet_subtype: the unicast 4addr packet subtype to use * * Return: false if the payload could not be encapsulated or true otherwise. */ bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv, struct sk_buff *skb, struct batadv_orig_node *orig, int packet_subtype) { struct batadv_hard_iface *primary_if; struct batadv_unicast_4addr_packet *uc_4addr_packet; bool ret = false; primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) goto out; /* Pull the header space and fill the unicast_packet substructure. * We can do that because the first member of the uc_4addr_packet * is of type struct unicast_packet */ if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet), orig)) goto out; uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR; ether_addr_copy(uc_4addr_packet->src, primary_if->net_dev->dev_addr); uc_4addr_packet->subtype = packet_subtype; uc_4addr_packet->reserved = 0; ret = true; out: batadv_hardif_put(primary_if); return ret; } /** * batadv_send_skb_unicast() - encapsulate and send an skb via unicast * @bat_priv: the bat priv with all the mesh interface information * @skb: payload to send * @packet_type: the batman unicast packet type to use * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast * 4addr packets) * @orig_node: the originator to send the packet to * @vid: the vid to be used to search the translation table * * Wrap the given skb into a batman-adv unicast or unicast-4addr header * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied * as packet_type. Then send this frame to the given orig_node. * * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. */ int batadv_send_skb_unicast(struct batadv_priv *bat_priv, struct sk_buff *skb, int packet_type, int packet_subtype, struct batadv_orig_node *orig_node, unsigned short vid) { struct batadv_unicast_packet *unicast_packet; struct ethhdr *ethhdr; int ret = NET_XMIT_DROP; if (!orig_node) goto out; switch (packet_type) { case BATADV_UNICAST: if (!batadv_send_skb_prepare_unicast(skb, orig_node)) goto out; break; case BATADV_UNICAST_4ADDR: if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb, orig_node, packet_subtype)) goto out; break; default: /* this function supports UNICAST and UNICAST_4ADDR only. It * should never be invoked with any other packet type */ goto out; } /* skb->data might have been reallocated by * batadv_send_skb_prepare_unicast{,_4addr}() */ ethhdr = eth_hdr(skb); unicast_packet = (struct batadv_unicast_packet *)skb->data; /* inform the destination node that we are still missing a correct route * for this client. The destination will receive this packet and will * try to reroute it because the ttvn contained in the header is less * than the current one */ if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) unicast_packet->ttvn = unicast_packet->ttvn - 1; ret = batadv_send_skb_to_orig(skb, orig_node, NULL); /* skb was consumed */ skb = NULL; out: kfree_skb(skb); return ret; } /** * batadv_send_skb_via_tt_generic() - send an skb via TT lookup * @bat_priv: the bat priv with all the mesh interface information * @skb: payload to send * @packet_type: the batman unicast packet type to use * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast * 4addr packets) * @dst_hint: can be used to override the destination contained in the skb * @vid: the vid to be used to search the translation table * * Look up the recipient node for the destination address in the ethernet * header via the translation table. Wrap the given skb into a batman-adv * unicast or unicast-4addr header depending on whether BATADV_UNICAST or * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame * to the according destination node. * * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. */ int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv, struct sk_buff *skb, int packet_type, int packet_subtype, u8 *dst_hint, unsigned short vid) { struct ethhdr *ethhdr = (struct ethhdr *)skb->data; struct batadv_orig_node *orig_node; u8 *src, *dst; int ret; src = ethhdr->h_source; dst = ethhdr->h_dest; /* if we got an hint! let's send the packet to this client (if any) */ if (dst_hint) { src = NULL; dst = dst_hint; } orig_node = batadv_transtable_search(bat_priv, src, dst, vid); ret = batadv_send_skb_unicast(bat_priv, skb, packet_type, packet_subtype, orig_node, vid); batadv_orig_node_put(orig_node); return ret; } /** * batadv_send_skb_via_gw() - send an skb via gateway lookup * @bat_priv: the bat priv with all the mesh interface information * @skb: payload to send * @vid: the vid to be used to search the translation table * * Look up the currently selected gateway. Wrap the given skb into a batman-adv * unicast header and send this frame to this gateway node. * * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. */ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid) { struct batadv_orig_node *orig_node; int ret; orig_node = batadv_gw_get_selected_orig(bat_priv); ret = batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR, BATADV_P_DATA, orig_node, vid); batadv_orig_node_put(orig_node); return ret; } /** * batadv_forw_packet_free() - free a forwarding packet * @forw_packet: The packet to free * @dropped: whether the packet is freed because is dropped * * This frees a forwarding packet and releases any resources it might * have claimed. */ void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet, bool dropped) { if (dropped) kfree_skb(forw_packet->skb); else consume_skb(forw_packet->skb); batadv_hardif_put(forw_packet->if_incoming); batadv_hardif_put(forw_packet->if_outgoing); if (forw_packet->queue_left) atomic_inc(forw_packet->queue_left); kfree(forw_packet); } /** * batadv_forw_packet_alloc() - allocate a forwarding packet * @if_incoming: The (optional) if_incoming to be grabbed * @if_outgoing: The (optional) if_outgoing to be grabbed * @queue_left: The (optional) queue counter to decrease * @bat_priv: The bat_priv for the mesh of this forw_packet * @skb: The raw packet this forwarding packet shall contain * * Allocates a forwarding packet and tries to get a reference to the * (optional) if_incoming, if_outgoing and queue_left. If queue_left * is NULL then bat_priv is optional, too. * * Return: An allocated forwarding packet on success, NULL otherwise. */ struct batadv_forw_packet * batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming, struct batadv_hard_iface *if_outgoing, atomic_t *queue_left, struct batadv_priv *bat_priv, struct sk_buff *skb) { struct batadv_forw_packet *forw_packet; const char *qname; if (queue_left && !batadv_atomic_dec_not_zero(queue_left)) { qname = "unknown"; if (queue_left == &bat_priv->bcast_queue_left) qname = "bcast"; if (queue_left == &bat_priv->batman_queue_left) qname = "batman"; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "%s queue is full\n", qname); return NULL; } forw_packet = kmalloc_obj(*forw_packet, GFP_ATOMIC); if (!forw_packet) goto err; if (if_incoming) kref_get(&if_incoming->refcount); if (if_outgoing) kref_get(&if_outgoing->refcount); INIT_HLIST_NODE(&forw_packet->list); INIT_HLIST_NODE(&forw_packet->cleanup_list); forw_packet->skb = skb; forw_packet->queue_left = queue_left; forw_packet->if_incoming = if_incoming; forw_packet->if_outgoing = if_outgoing; forw_packet->num_packets = 1; return forw_packet; err: if (queue_left) atomic_inc(queue_left); return NULL; } /** * batadv_forw_packet_was_stolen() - check whether someone stole this packet * @forw_packet: the forwarding packet to check * * This function checks whether the given forwarding packet was claimed by * someone else for free(). * * Return: True if someone stole it, false otherwise. */ static bool batadv_forw_packet_was_stolen(struct batadv_forw_packet *forw_packet) { return !hlist_unhashed(&forw_packet->cleanup_list); } /** * batadv_forw_packet_steal() - claim a forw_packet for free() * @forw_packet: the forwarding packet to steal * @lock: a key to the store to steal from (e.g. forw_{bat,bcast}_list_lock) * * This function tries to steal a specific forw_packet from global * visibility for the purpose of getting it for free(). That means * the caller is *not* allowed to requeue it afterwards. * * Return: True if stealing was successful. False if someone else stole it * before us. */ bool batadv_forw_packet_steal(struct batadv_forw_packet *forw_packet, spinlock_t *lock) { /* did purging routine steal it earlier? */ spin_lock_bh(lock); if (batadv_forw_packet_was_stolen(forw_packet)) { spin_unlock_bh(lock); return false; } hlist_del_init(&forw_packet->list); /* Just to spot misuse of this function */ hlist_add_fake(&forw_packet->cleanup_list); spin_unlock_bh(lock); return true; } /** * batadv_forw_packet_list_steal() - claim a list of forward packets for free() * @forw_list: the to be stolen forward packets * @cleanup_list: a backup pointer, to be able to dispose the packet later * @hard_iface: the interface to steal forward packets from * * This function claims responsibility to free any forw_packet queued on the * given hard_iface. If hard_iface is NULL forwarding packets on all hard * interfaces will be claimed. * * The packets are being moved from the forw_list to the cleanup_list. This * makes it possible for already running threads to notice the claim. */ static void batadv_forw_packet_list_steal(struct hlist_head *forw_list, struct hlist_head *cleanup_list, const struct batadv_hard_iface *hard_iface) { struct batadv_forw_packet *forw_packet; struct hlist_node *safe_tmp_node; hlist_for_each_entry_safe(forw_packet, safe_tmp_node, forw_list, list) { /* if purge_outstanding_packets() was called with an argument * we delete only packets belonging to the given interface */ if (hard_iface && forw_packet->if_incoming != hard_iface && forw_packet->if_outgoing != hard_iface) continue; hlist_del(&forw_packet->list); hlist_add_head(&forw_packet->cleanup_list, cleanup_list); } } /** * batadv_forw_packet_list_free() - free a list of forward packets * @head: a list of to be freed forw_packets * * This function cancels the scheduling of any packet in the provided list, * waits for any possibly running packet forwarding thread to finish and * finally, safely frees this forward packet. * * This function might sleep. */ static void batadv_forw_packet_list_free(struct hlist_head *head) { struct batadv_forw_packet *forw_packet; struct hlist_node *safe_tmp_node; hlist_for_each_entry_safe(forw_packet, safe_tmp_node, head, cleanup_list) { cancel_delayed_work_sync(&forw_packet->delayed_work); hlist_del(&forw_packet->cleanup_list); batadv_forw_packet_free(forw_packet, true); } } /** * batadv_forw_packet_queue() - try to queue a forwarding packet * @forw_packet: the forwarding packet to queue * @lock: a key to the store (e.g. forw_{bat,bcast}_list_lock) * @head: the shelve to queue it on (e.g. forw_{bat,bcast}_list) * @send_time: timestamp (jiffies) when the packet is to be sent * * This function tries to (re)queue a forwarding packet. Requeuing * is prevented if the according interface is shutting down * (e.g. if batadv_forw_packet_list_steal() was called for this * packet earlier). * * Calling batadv_forw_packet_queue() after a call to * batadv_forw_packet_steal() is forbidden! * * Caller needs to ensure that forw_packet->delayed_work was initialized. */ static void batadv_forw_packet_queue(struct batadv_forw_packet *forw_packet, spinlock_t *lock, struct hlist_head *head, unsigned long send_time) { spin_lock_bh(lock); /* did purging routine steal it from us? */ if (batadv_forw_packet_was_stolen(forw_packet)) { /* If you got it for free() without trouble, then * don't get back into the queue after stealing... */ WARN_ONCE(hlist_fake(&forw_packet->cleanup_list), "Requeuing after batadv_forw_packet_steal() not allowed!\n"); spin_unlock_bh(lock); return; } hlist_del_init(&forw_packet->list); hlist_add_head(&forw_packet->list, head); queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work, send_time - jiffies); spin_unlock_bh(lock); } /** * batadv_forw_packet_bcast_queue() - try to queue a broadcast packet * @bat_priv: the bat priv with all the mesh interface information * @forw_packet: the forwarding packet to queue * @send_time: timestamp (jiffies) when the packet is to be sent * * This function tries to (re)queue a broadcast packet. * * Caller needs to ensure that forw_packet->delayed_work was initialized. */ static void batadv_forw_packet_bcast_queue(struct batadv_priv *bat_priv, struct batadv_forw_packet *forw_packet, unsigned long send_time) { batadv_forw_packet_queue(forw_packet, &bat_priv->forw_bcast_list_lock, &bat_priv->forw_bcast_list, send_time); } /** * batadv_forw_packet_ogmv1_queue() - try to queue an OGMv1 packet * @bat_priv: the bat priv with all the mesh interface information * @forw_packet: the forwarding packet to queue * @send_time: timestamp (jiffies) when the packet is to be sent * * This function tries to (re)queue an OGMv1 packet. * * Caller needs to ensure that forw_packet->delayed_work was initialized. */ void batadv_forw_packet_ogmv1_queue(struct batadv_priv *bat_priv, struct batadv_forw_packet *forw_packet, unsigned long send_time) { batadv_forw_packet_queue(forw_packet, &bat_priv->forw_bat_list_lock, &bat_priv->forw_bat_list, send_time); } /** * batadv_forw_bcast_packet_to_list() - queue broadcast packet for transmissions * @bat_priv: the bat priv with all the mesh interface information * @skb: broadcast packet to add * @delay: number of jiffies to wait before sending * @own_packet: true if it is a self-generated broadcast packet * @if_in: the interface where the packet was received on * @if_out: the outgoing interface to queue on * * Adds a broadcast packet to the queue and sets up timers. Broadcast packets * are sent multiple times to increase probability for being received. * * This call clones the given skb, hence the caller needs to take into * account that the data segment of the original skb might not be * modifiable anymore. * * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors. */ static int batadv_forw_bcast_packet_to_list(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned long delay, bool own_packet, struct batadv_hard_iface *if_in, struct batadv_hard_iface *if_out) { struct batadv_forw_packet *forw_packet; unsigned long send_time = jiffies; struct sk_buff *newskb; newskb = skb_clone(skb, GFP_ATOMIC); if (!newskb) goto err; forw_packet = batadv_forw_packet_alloc(if_in, if_out, &bat_priv->bcast_queue_left, bat_priv, newskb); if (!forw_packet) goto err_packet_free; forw_packet->own = own_packet; INIT_DELAYED_WORK(&forw_packet->delayed_work, batadv_send_outstanding_bcast_packet); send_time += delay ? delay : msecs_to_jiffies(5); batadv_forw_packet_bcast_queue(bat_priv, forw_packet, send_time); return NETDEV_TX_OK; err_packet_free: kfree_skb(newskb); err: return NETDEV_TX_BUSY; } /** * batadv_forw_bcast_packet_if() - forward and queue a broadcast packet * @bat_priv: the bat priv with all the mesh interface information * @skb: broadcast packet to add * @delay: number of jiffies to wait before sending * @own_packet: true if it is a self-generated broadcast packet * @if_in: the interface where the packet was received on * @if_out: the outgoing interface to forward to * * Transmits a broadcast packet on the specified interface either immediately * or if a delay is given after that. Furthermore, queues additional * retransmissions if this interface is a wireless one. * * This call clones the given skb, hence the caller needs to take into * account that the data segment of the original skb might not be * modifiable anymore. * * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors. */ static int batadv_forw_bcast_packet_if(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned long delay, bool own_packet, struct batadv_hard_iface *if_in, struct batadv_hard_iface *if_out) { unsigned int num_bcasts = if_out->num_bcasts; struct sk_buff *newskb; int ret = NETDEV_TX_OK; if (!delay) { newskb = skb_clone(skb, GFP_ATOMIC); if (!newskb) return NETDEV_TX_BUSY; batadv_send_broadcast_skb(newskb, if_out); num_bcasts--; } /* delayed broadcast or rebroadcasts? */ if (num_bcasts >= 1) { BATADV_SKB_CB(skb)->num_bcasts = num_bcasts; ret = batadv_forw_bcast_packet_to_list(bat_priv, skb, delay, own_packet, if_in, if_out); } return ret; } /** * batadv_send_no_broadcast() - check whether (re)broadcast is necessary * @bat_priv: the bat priv with all the mesh interface information * @skb: broadcast packet to check * @own_packet: true if it is a self-generated broadcast packet * @if_out: the outgoing interface checked and considered for (re)broadcast * * Return: False if a packet needs to be (re)broadcasted on the given interface, * true otherwise. */ static bool batadv_send_no_broadcast(struct batadv_priv *bat_priv, struct sk_buff *skb, bool own_packet, struct batadv_hard_iface *if_out) { struct batadv_hardif_neigh_node *neigh_node = NULL; struct batadv_bcast_packet *bcast_packet; u8 *orig_neigh; u8 *neigh_addr; char *type; int ret; if (!own_packet) { neigh_addr = eth_hdr(skb)->h_source; neigh_node = batadv_hardif_neigh_get(if_out, neigh_addr); } bcast_packet = (struct batadv_bcast_packet *)skb->data; orig_neigh = neigh_node ? neigh_node->orig : NULL; ret = batadv_hardif_no_broadcast(if_out, bcast_packet->orig, orig_neigh); batadv_hardif_neigh_put(neigh_node); /* ok, may broadcast */ if (!ret) return false; /* no broadcast */ switch (ret) { case BATADV_HARDIF_BCAST_NORECIPIENT: type = "no neighbor"; break; case BATADV_HARDIF_BCAST_DUPFWD: type = "single neighbor is source"; break; case BATADV_HARDIF_BCAST_DUPORIG: type = "single neighbor is originator"; break; default: type = "unknown"; } batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "BCAST packet from orig %pM on %s suppressed: %s\n", bcast_packet->orig, if_out->net_dev->name, type); return true; } /** * __batadv_forw_bcast_packet() - forward and queue a broadcast packet * @bat_priv: the bat priv with all the mesh interface information * @skb: broadcast packet to add * @delay: number of jiffies to wait before sending * @own_packet: true if it is a self-generated broadcast packet * * Transmits a broadcast packet either immediately or if a delay is given * after that. Furthermore, queues additional retransmissions on wireless * interfaces. * * This call clones the given skb, hence the caller needs to take into * account that the data segment of the given skb might not be * modifiable anymore. * * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors. */ static int __batadv_forw_bcast_packet(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned long delay, bool own_packet) { struct batadv_hard_iface *hard_iface; struct batadv_hard_iface *primary_if; struct list_head *iter; int ret = NETDEV_TX_OK; primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) return NETDEV_TX_BUSY; rcu_read_lock(); netdev_for_each_lower_private_rcu(bat_priv->mesh_iface, hard_iface, iter) { if (!kref_get_unless_zero(&hard_iface->refcount)) continue; if (batadv_send_no_broadcast(bat_priv, skb, own_packet, hard_iface)) { batadv_hardif_put(hard_iface); continue; } ret = batadv_forw_bcast_packet_if(bat_priv, skb, delay, own_packet, primary_if, hard_iface); batadv_hardif_put(hard_iface); if (ret == NETDEV_TX_BUSY) break; } rcu_read_unlock(); batadv_hardif_put(primary_if); return ret; } /** * batadv_forw_bcast_packet() - forward and queue a broadcast packet * @bat_priv: the bat priv with all the mesh interface information * @skb: broadcast packet to add * @delay: number of jiffies to wait before sending * @own_packet: true if it is a self-generated broadcast packet * * Transmits a broadcast packet either immediately or if a delay is given * after that. Furthermore, queues additional retransmissions on wireless * interfaces. * * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors. */ int batadv_forw_bcast_packet(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned long delay, bool own_packet) { return __batadv_forw_bcast_packet(bat_priv, skb, delay, own_packet); } /** * batadv_send_bcast_packet() - send and queue a broadcast packet * @bat_priv: the bat priv with all the mesh interface information * @skb: broadcast packet to add * @delay: number of jiffies to wait before sending * @own_packet: true if it is a self-generated broadcast packet * * Transmits a broadcast packet either immediately or if a delay is given * after that. Furthermore, queues additional retransmissions on wireless * interfaces. * * Consumes the provided skb. */ void batadv_send_bcast_packet(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned long delay, bool own_packet) { __batadv_forw_bcast_packet(bat_priv, skb, delay, own_packet); consume_skb(skb); } /** * batadv_forw_packet_bcasts_left() - check if a retransmission is necessary * @forw_packet: the forwarding packet to check * * Checks whether a given packet has any (re)transmissions left on the provided * interface. * * hard_iface may be NULL: In that case the number of transmissions this skb had * so far is compared with the maximum amount of retransmissions independent of * any interface instead. * * Return: True if (re)transmissions are left, false otherwise. */ static bool batadv_forw_packet_bcasts_left(struct batadv_forw_packet *forw_packet) { return BATADV_SKB_CB(forw_packet->skb)->num_bcasts; } /** * batadv_forw_packet_bcasts_dec() - decrement retransmission counter of a * packet * @forw_packet: the packet to decrease the counter for */ static void batadv_forw_packet_bcasts_dec(struct batadv_forw_packet *forw_packet) { BATADV_SKB_CB(forw_packet->skb)->num_bcasts--; } /** * batadv_forw_packet_is_rebroadcast() - check packet for previous transmissions * @forw_packet: the packet to check * * Return: True if this packet was transmitted before, false otherwise. */ bool batadv_forw_packet_is_rebroadcast(struct batadv_forw_packet *forw_packet) { unsigned char num_bcasts = BATADV_SKB_CB(forw_packet->skb)->num_bcasts; return num_bcasts != forw_packet->if_outgoing->num_bcasts; } /** * batadv_send_outstanding_bcast_packet() - transmit a queued broadcast packet * @work: work queue item * * Transmits a queued broadcast packet and if necessary reschedules it. */ static void batadv_send_outstanding_bcast_packet(struct work_struct *work) { unsigned long send_time = jiffies + msecs_to_jiffies(5); struct batadv_forw_packet *forw_packet; struct delayed_work *delayed_work; struct batadv_priv *bat_priv; struct sk_buff *skb1; bool dropped = false; delayed_work = to_delayed_work(work); forw_packet = container_of(delayed_work, struct batadv_forw_packet, delayed_work); bat_priv = netdev_priv(forw_packet->if_incoming->mesh_iface); if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) { dropped = true; goto out; } if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet)) { dropped = true; goto out; } /* send a copy of the saved skb */ skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); if (!skb1) goto out; batadv_send_broadcast_skb(skb1, forw_packet->if_outgoing); batadv_forw_packet_bcasts_dec(forw_packet); if (batadv_forw_packet_bcasts_left(forw_packet)) { batadv_forw_packet_bcast_queue(bat_priv, forw_packet, send_time); return; } out: /* do we get something for free()? */ if (batadv_forw_packet_steal(forw_packet, &bat_priv->forw_bcast_list_lock)) batadv_forw_packet_free(forw_packet, dropped); } /** * batadv_purge_outstanding_packets() - stop/purge scheduled bcast/OGMv1 packets * @bat_priv: the bat priv with all the mesh interface information * @hard_iface: the hard interface to cancel and purge bcast/ogm packets on * * This method cancels and purges any broadcast and OGMv1 packet on the given * hard_iface. If hard_iface is NULL, broadcast and OGMv1 packets on all hard * interfaces will be canceled and purged. * * This function might sleep. */ void batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, const struct batadv_hard_iface *hard_iface) { struct hlist_head head = HLIST_HEAD_INIT; if (hard_iface) batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "%s(): %s\n", __func__, hard_iface->net_dev->name); else batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "%s()\n", __func__); /* claim bcast list for free() */ spin_lock_bh(&bat_priv->forw_bcast_list_lock); batadv_forw_packet_list_steal(&bat_priv->forw_bcast_list, &head, hard_iface); spin_unlock_bh(&bat_priv->forw_bcast_list_lock); /* claim batman packet list for free() */ spin_lock_bh(&bat_priv->forw_bat_list_lock); batadv_forw_packet_list_steal(&bat_priv->forw_bat_list, &head, hard_iface); spin_unlock_bh(&bat_priv->forw_bat_list_lock); /* then cancel or wait for packet workers to finish and free */ batadv_forw_packet_list_free(&head); }
5037 5037 4987 4986 5037 5037 1 5 5010 5037 5 5 5037 5024 5 8 4966 4983 2 2 2 7 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * linux/drivers/char/serial_core.h * * Copyright (C) 2000 Deep Blue Solutions Ltd. */ #ifndef LINUX_SERIAL_CORE_H #define LINUX_SERIAL_CORE_H #include <linux/bitops.h> #include <linux/compiler.h> #include <linux/console.h> #include <linux/interrupt.h> #include <linux/lockdep.h> #include <linux/printk.h> #include <linux/spinlock.h> #include <linux/sched.h> #include <linux/tty.h> #include <linux/mutex.h> #include <linux/sysrq.h> #include <uapi/linux/serial_core.h> #ifdef CONFIG_SERIAL_CORE_CONSOLE #define uart_console(port) \ ((port)->cons && (port)->cons->index == (port)->line) #else #define uart_console(port) ({ (void)port; 0; }) #endif struct uart_port; struct serial_struct; struct serial_port_device; struct device; struct gpio_desc; /** * struct uart_ops -- interface between serial_core and the driver * * This structure describes all the operations that can be done on the * physical hardware. * * @tx_empty: ``unsigned int ()(struct uart_port *port)`` * * This function tests whether the transmitter fifo and shifter for the * @port is empty. If it is empty, this function should return * %TIOCSER_TEMT, otherwise return 0. If the port does not support this * operation, then it should return %TIOCSER_TEMT. * * Locking: none. * Interrupts: caller dependent. * This call must not sleep * * @set_mctrl: ``void ()(struct uart_port *port, unsigned int mctrl)`` * * This function sets the modem control lines for @port to the state * described by @mctrl. The relevant bits of @mctrl are: * * - %TIOCM_RTS RTS signal. * - %TIOCM_DTR DTR signal. * - %TIOCM_OUT1 OUT1 signal. * - %TIOCM_OUT2 OUT2 signal. * - %TIOCM_LOOP Set the port into loopback mode. * * If the appropriate bit is set, the signal should be driven * active. If the bit is clear, the signal should be driven * inactive. * * Locking: @port->lock taken. * Interrupts: locally disabled. * This call must not sleep * * @get_mctrl: ``unsigned int ()(struct uart_port *port)`` * * Returns the current state of modem control inputs of @port. The state * of the outputs should not be returned, since the core keeps track of * their state. The state information should include: * * - %TIOCM_CAR state of DCD signal * - %TIOCM_CTS state of CTS signal * - %TIOCM_DSR state of DSR signal * - %TIOCM_RI state of RI signal * * The bit is set if the signal is currently driven active. If * the port does not support CTS, DCD or DSR, the driver should * indicate that the signal is permanently active. If RI is * not available, the signal should not be indicated as active. * * Locking: @port->lock taken. * Interrupts: locally disabled. * This call must not sleep * * @stop_tx: ``void ()(struct uart_port *port)`` * * Stop transmitting characters. This might be due to the CTS line * becoming inactive or the tty layer indicating we want to stop * transmission due to an %XOFF character. * * The driver should stop transmitting characters as soon as possible. * * Locking: @port->lock taken. * Interrupts: locally disabled. * This call must not sleep * * @start_tx: ``void ()(struct uart_port *port)`` * * Start transmitting characters. * * Locking: @port->lock taken. * Interrupts: locally disabled. * This call must not sleep * * @throttle: ``void ()(struct uart_port *port)`` * * Notify the serial driver that input buffers for the line discipline are * close to full, and it should somehow signal that no more characters * should be sent to the serial port. * This will be called only if hardware assisted flow control is enabled. * * Locking: serialized with @unthrottle() and termios modification by the * tty layer. * * @unthrottle: ``void ()(struct uart_port *port)`` * * Notify the serial driver that characters can now be sent to the serial * port without fear of overrunning the input buffers of the line * disciplines. * * This will be called only if hardware assisted flow control is enabled. * * Locking: serialized with @throttle() and termios modification by the * tty layer. * * @send_xchar: ``void ()(struct uart_port *port, char ch)`` * * Transmit a high priority character, even if the port is stopped. This * is used to implement XON/XOFF flow control and tcflow(). If the serial * driver does not implement this function, the tty core will append the * character to the circular buffer and then call start_tx() / stop_tx() * to flush the data out. * * Do not transmit if @ch == '\0' (%__DISABLED_CHAR). * * Locking: none. * Interrupts: caller dependent. * * @start_rx: ``void ()(struct uart_port *port)`` * * Start receiving characters. * * Locking: @port->lock taken. * Interrupts: locally disabled. * This call must not sleep * * @stop_rx: ``void ()(struct uart_port *port)`` * * Stop receiving characters; the @port is in the process of being closed. * * Locking: @port->lock taken. * Interrupts: locally disabled. * This call must not sleep * * @enable_ms: ``void ()(struct uart_port *port)`` * * Enable the modem status interrupts. * * This method may be called multiple times. Modem status interrupts * should be disabled when the @shutdown() method is called. * * Locking: @port->lock taken. * Interrupts: locally disabled. * This call must not sleep * * @break_ctl: ``void ()(struct uart_port *port, int ctl)`` * * Control the transmission of a break signal. If @ctl is nonzero, the * break signal should be transmitted. The signal should be terminated * when another call is made with a zero @ctl. * * Locking: caller holds tty_port->mutex * * @startup: ``int ()(struct uart_port *port)`` * * Grab any interrupt resources and initialise any low level driver state. * Enable the port for reception. It should not activate RTS nor DTR; * this will be done via a separate call to @set_mctrl(). * * This method will only be called when the port is initially opened. * * Locking: port_sem taken. * Interrupts: globally disabled. * * @shutdown: ``void ()(struct uart_port *port)`` * * Disable the @port, disable any break condition that may be in effect, * and free any interrupt resources. It should not disable RTS nor DTR; * this will have already been done via a separate call to @set_mctrl(). * * Drivers must not access @port->state once this call has completed. * * This method will only be called when there are no more users of this * @port. * * Locking: port_sem taken. * Interrupts: caller dependent. * * @flush_buffer: ``void ()(struct uart_port *port)`` * * Flush any write buffers, reset any DMA state and stop any ongoing DMA * transfers. * * This will be called whenever the @port->state->xmit circular buffer is * cleared. * * Locking: @port->lock taken. * Interrupts: locally disabled. * This call must not sleep * * @set_termios: ``void ()(struct uart_port *port, struct ktermios *new, * struct ktermios *old)`` * * Change the @port parameters, including word length, parity, stop bits. * Update @port->read_status_mask and @port->ignore_status_mask to * indicate the types of events we are interested in receiving. Relevant * ktermios::c_cflag bits are: * * - %CSIZE - word size * - %CSTOPB - 2 stop bits * - %PARENB - parity enable * - %PARODD - odd parity (when %PARENB is in force) * - %ADDRB - address bit (changed through uart_port::rs485_config()). * - %CREAD - enable reception of characters (if not set, still receive * characters from the port, but throw them away). * - %CRTSCTS - if set, enable CTS status change reporting. * - %CLOCAL - if not set, enable modem status change reporting. * * Relevant ktermios::c_iflag bits are: * * - %INPCK - enable frame and parity error events to be passed to the TTY * layer. * - %BRKINT / %PARMRK - both of these enable break events to be passed to * the TTY layer. * - %IGNPAR - ignore parity and framing errors. * - %IGNBRK - ignore break errors. If %IGNPAR is also set, ignore overrun * errors as well. * * The interaction of the ktermios::c_iflag bits is as follows (parity * error given as an example): * * ============ ======= ======= ========================================= * Parity error INPCK IGNPAR * ============ ======= ======= ========================================= * n/a 0 n/a character received, marked as %TTY_NORMAL * None 1 n/a character received, marked as %TTY_NORMAL * Yes 1 0 character received, marked as %TTY_PARITY * Yes 1 1 character discarded * ============ ======= ======= ========================================= * * Other flags may be used (eg, xon/xoff characters) if your hardware * supports hardware "soft" flow control. * * Locking: caller holds tty_port->mutex * Interrupts: caller dependent. * This call must not sleep * * @set_ldisc: ``void ()(struct uart_port *port, struct ktermios *termios)`` * * Notifier for discipline change. See * Documentation/driver-api/tty/tty_ldisc.rst. * * Locking: caller holds tty_port->mutex * * @pm: ``void ()(struct uart_port *port, unsigned int state, * unsigned int oldstate)`` * * Perform any power management related activities on the specified @port. * @state indicates the new state (defined by enum uart_pm_state), * @oldstate indicates the previous state. * * This function should not be used to grab any resources. * * This will be called when the @port is initially opened and finally * closed, except when the @port is also the system console. This will * occur even if %CONFIG_PM is not set. * * Locking: none. * Interrupts: caller dependent. * * @type: ``const char *()(struct uart_port *port)`` * * Return a pointer to a string constant describing the specified @port, * or return %NULL, in which case the string 'unknown' is substituted. * * Locking: none. * Interrupts: caller dependent. * * @release_port: ``void ()(struct uart_port *port)`` * * Release any memory and IO region resources currently in use by the * @port. * * Locking: none. * Interrupts: caller dependent. * * @request_port: ``int ()(struct uart_port *port)`` * * Request any memory and IO region resources required by the port. If any * fail, no resources should be registered when this function returns, and * it should return -%EBUSY on failure. * * Locking: none. * Interrupts: caller dependent. * * @config_port: ``void ()(struct uart_port *port, int type)`` * * Perform any autoconfiguration steps required for the @port. @type * contains a bit mask of the required configuration. %UART_CONFIG_TYPE * indicates that the port requires detection and identification. * @port->type should be set to the type found, or %PORT_UNKNOWN if no * port was detected. * * %UART_CONFIG_IRQ indicates autoconfiguration of the interrupt signal, * which should be probed using standard kernel autoprobing techniques. * This is not necessary on platforms where ports have interrupts * internally hard wired (eg, system on a chip implementations). * * Locking: none. * Interrupts: caller dependent. * * @verify_port: ``int ()(struct uart_port *port, * struct serial_struct *serinfo)`` * * Verify the new serial port information contained within @serinfo is * suitable for this port type. * * Locking: none. * Interrupts: caller dependent. * * @ioctl: ``int ()(struct uart_port *port, unsigned int cmd, * unsigned long arg)`` * * Perform any port specific IOCTLs. IOCTL commands must be defined using * the standard numbering system found in <asm/ioctl.h>. * * Locking: none. * Interrupts: caller dependent. * * @poll_init: ``int ()(struct uart_port *port)`` * * Called by kgdb to perform the minimal hardware initialization needed to * support @poll_put_char() and @poll_get_char(). Unlike @startup(), this * should not request interrupts. * * Locking: %tty_mutex and tty_port->mutex taken. * Interrupts: n/a. * * @poll_put_char: ``void ()(struct uart_port *port, unsigned char ch)`` * * Called by kgdb to write a single character @ch directly to the serial * @port. It can and should block until there is space in the TX FIFO. * * Locking: none. * Interrupts: caller dependent. * This call must not sleep * * @poll_get_char: ``int ()(struct uart_port *port)`` * * Called by kgdb to read a single character directly from the serial * port. If data is available, it should be returned; otherwise the * function should return %NO_POLL_CHAR immediately. * * Locking: none. * Interrupts: caller dependent. * This call must not sleep */ struct uart_ops { unsigned int (*tx_empty)(struct uart_port *); void (*set_mctrl)(struct uart_port *, unsigned int mctrl); unsigned int (*get_mctrl)(struct uart_port *); void (*stop_tx)(struct uart_port *); void (*start_tx)(struct uart_port *); void (*throttle)(struct uart_port *); void (*unthrottle)(struct uart_port *); void (*send_xchar)(struct uart_port *, char ch); void (*stop_rx)(struct uart_port *); void (*start_rx)(struct uart_port *); void (*enable_ms)(struct uart_port *); void (*break_ctl)(struct uart_port *, int ctl); int (*startup)(struct uart_port *); void (*shutdown)(struct uart_port *); void (*flush_buffer)(struct uart_port *); void (*set_termios)(struct uart_port *, struct ktermios *new, const struct ktermios *old); void (*set_ldisc)(struct uart_port *, struct ktermios *); void (*pm)(struct uart_port *, unsigned int state, unsigned int oldstate); const char *(*type)(struct uart_port *); void (*release_port)(struct uart_port *); int (*request_port)(struct uart_port *); void (*config_port)(struct uart_port *, int); int (*verify_port)(struct uart_port *, struct serial_struct *); int (*ioctl)(struct uart_port *, unsigned int, unsigned long); #ifdef CONFIG_CONSOLE_POLL int (*poll_init)(struct uart_port *); void (*poll_put_char)(struct uart_port *, unsigned char); int (*poll_get_char)(struct uart_port *); #endif }; #define NO_POLL_CHAR 0x00ff0000 #define UART_CONFIG_TYPE (1 << 0) #define UART_CONFIG_IRQ (1 << 1) struct uart_icount { __u32 cts; __u32 dsr; __u32 rng; __u32 dcd; __u32 rx; __u32 tx; __u32 frame; __u32 overrun; __u32 parity; __u32 brk; __u32 buf_overrun; }; typedef u64 __bitwise upf_t; typedef unsigned int __bitwise upstat_t; enum uart_iotype { UPIO_UNKNOWN = -1, UPIO_PORT = SERIAL_IO_PORT, /* 8b I/O port access */ UPIO_HUB6 = SERIAL_IO_HUB6, /* Hub6 ISA card */ UPIO_MEM = SERIAL_IO_MEM, /* driver-specific */ UPIO_MEM32 = SERIAL_IO_MEM32, /* 32b little endian */ UPIO_AU = SERIAL_IO_AU, /* Au1x00 and RT288x type IO */ UPIO_TSI = SERIAL_IO_TSI, /* Tsi108/109 type IO */ UPIO_MEM32BE = SERIAL_IO_MEM32BE, /* 32b big endian */ UPIO_MEM16 = SERIAL_IO_MEM16, /* 16b little endian */ }; struct uart_port { spinlock_t lock; /* port lock */ unsigned long iobase; /* in/out[bwl] */ unsigned char __iomem *membase; /* read/write[bwl] */ u32 (*serial_in)(struct uart_port *, unsigned int offset); void (*serial_out)(struct uart_port *, unsigned int offset, u32 val); void (*set_termios)(struct uart_port *, struct ktermios *new, const struct ktermios *old); void (*set_ldisc)(struct uart_port *, struct ktermios *); unsigned int (*get_mctrl)(struct uart_port *); void (*set_mctrl)(struct uart_port *, unsigned int); unsigned int (*get_divisor)(struct uart_port *, unsigned int baud, unsigned int *frac); void (*set_divisor)(struct uart_port *, unsigned int baud, unsigned int quot, unsigned int quot_frac); int (*startup)(struct uart_port *port); void (*shutdown)(struct uart_port *port); void (*throttle)(struct uart_port *port); void (*unthrottle)(struct uart_port *port); int (*handle_irq)(struct uart_port *); void (*pm)(struct uart_port *, unsigned int state, unsigned int old); void (*handle_break)(struct uart_port *); int (*rs485_config)(struct uart_port *, struct ktermios *termios, struct serial_rs485 *rs485); int (*iso7816_config)(struct uart_port *, struct serial_iso7816 *iso7816); unsigned int ctrl_id; /* optional serial core controller id */ unsigned int port_id; /* optional serial core port id */ unsigned int irq; /* irq number */ unsigned long irqflags; /* irq flags */ unsigned int uartclk; /* base uart clock */ unsigned int fifosize; /* tx fifo size */ unsigned char x_char; /* xon/xoff char */ unsigned char regshift; /* reg offset shift */ unsigned char quirks; /* internal quirks */ /* internal quirks must be updated while holding port mutex */ #define UPQ_NO_TXEN_TEST BIT(0) enum uart_iotype iotype; /* io access style */ unsigned int read_status_mask; /* driver specific */ unsigned int ignore_status_mask; /* driver specific */ struct uart_state *state; /* pointer to parent state */ struct uart_icount icount; /* statistics */ struct console *cons; /* struct console, if any */ /* flags must be updated while holding port mutex */ upf_t flags; /* * These flags must be equivalent to the flags defined in * include/uapi/linux/tty_flags.h which are the userspace definitions * assigned from the serial_struct flags in uart_set_info() * [for bit definitions in the UPF_CHANGE_MASK] * * Bits [0..ASYNCB_LAST_USER] are userspace defined/visible/changeable * The remaining bits are serial-core specific and not modifiable by * userspace. */ #ifdef CONFIG_HAS_IOPORT #define UPF_FOURPORT ((__force upf_t) ASYNC_FOURPORT /* 1 */ ) #else #define UPF_FOURPORT 0 #endif #define UPF_SAK ((__force upf_t) ASYNC_SAK /* 2 */ ) #define UPF_SPD_HI ((__force upf_t) ASYNC_SPD_HI /* 4 */ ) #define UPF_SPD_VHI ((__force upf_t) ASYNC_SPD_VHI /* 5 */ ) #define UPF_SPD_CUST ((__force upf_t) ASYNC_SPD_CUST /* 0x0030 */ ) #define UPF_SPD_WARP ((__force upf_t) ASYNC_SPD_WARP /* 0x1010 */ ) #define UPF_SPD_MASK ((__force upf_t) ASYNC_SPD_MASK /* 0x1030 */ ) #define UPF_SKIP_TEST ((__force upf_t) ASYNC_SKIP_TEST /* 6 */ ) #define UPF_AUTO_IRQ ((__force upf_t) ASYNC_AUTO_IRQ /* 7 */ ) #define UPF_HARDPPS_CD ((__force upf_t) ASYNC_HARDPPS_CD /* 11 */ ) #define UPF_SPD_SHI ((__force upf_t) ASYNC_SPD_SHI /* 12 */ ) #define UPF_LOW_LATENCY ((__force upf_t) ASYNC_LOW_LATENCY /* 13 */ ) #define UPF_BUGGY_UART ((__force upf_t) ASYNC_BUGGY_UART /* 14 */ ) #define UPF_MAGIC_MULTIPLIER ((__force upf_t) ASYNC_MAGIC_MULTIPLIER /* 16 */ ) #define UPF_NO_THRE_TEST ((__force upf_t) BIT_ULL(19)) /* Port has hardware-assisted h/w flow control */ #define UPF_AUTO_CTS ((__force upf_t) BIT_ULL(20)) #define UPF_AUTO_RTS ((__force upf_t) BIT_ULL(21)) #define UPF_HARD_FLOW ((__force upf_t) (UPF_AUTO_CTS | UPF_AUTO_RTS)) /* Port has hardware-assisted s/w flow control */ #define UPF_SOFT_FLOW ((__force upf_t) BIT_ULL(22)) #define UPF_CONS_FLOW ((__force upf_t) BIT_ULL(23)) #define UPF_SHARE_IRQ ((__force upf_t) BIT_ULL(24)) #define UPF_EXAR_EFR ((__force upf_t) BIT_ULL(25)) #define UPF_BUG_THRE ((__force upf_t) BIT_ULL(26)) /* The exact UART type is known and should not be probed. */ #define UPF_FIXED_TYPE ((__force upf_t) BIT_ULL(27)) #define UPF_BOOT_AUTOCONF ((__force upf_t) BIT_ULL(28)) #define UPF_FIXED_PORT ((__force upf_t) BIT_ULL(29)) #define UPF_DEAD ((__force upf_t) BIT_ULL(30)) #define UPF_IOREMAP ((__force upf_t) BIT_ULL(31)) #define UPF_FULL_PROBE ((__force upf_t) BIT_ULL(32)) #define __UPF_CHANGE_MASK 0x17fff #define UPF_CHANGE_MASK ((__force upf_t) __UPF_CHANGE_MASK) #define UPF_USR_MASK ((__force upf_t) (UPF_SPD_MASK|UPF_LOW_LATENCY)) #if __UPF_CHANGE_MASK > ASYNC_FLAGS #error Change mask not equivalent to userspace-visible bit defines #endif /* * Must hold termios_rwsem, port mutex and port lock to change; * can hold any one lock to read. */ upstat_t status; #define UPSTAT_CTS_ENABLE ((__force upstat_t) (1 << 0)) #define UPSTAT_DCD_ENABLE ((__force upstat_t) (1 << 1)) #define UPSTAT_AUTORTS ((__force upstat_t) (1 << 2)) #define UPSTAT_AUTOCTS ((__force upstat_t) (1 << 3)) #define UPSTAT_AUTOXOFF ((__force upstat_t) (1 << 4)) #define UPSTAT_SYNC_FIFO ((__force upstat_t) (1 << 5)) bool hw_stopped; /* sw-assisted CTS flow state */ unsigned int mctrl; /* current modem ctrl settings */ unsigned int frame_time; /* frame timing in ns */ unsigned int type; /* port type */ const struct uart_ops *ops; unsigned int custom_divisor; unsigned int line; /* port index */ unsigned int minor; resource_size_t mapbase; /* for ioremap */ resource_size_t mapsize; struct device *dev; /* serial port physical parent device */ struct serial_port_device *port_dev; /* serial core port device */ unsigned long sysrq; /* sysrq timeout */ u8 sysrq_ch; /* char for sysrq */ unsigned char has_sysrq; unsigned char sysrq_seq; /* index in sysrq_toggle_seq */ unsigned char hub6; /* this should be in the 8250 driver */ unsigned char suspended; unsigned char console_reinit; const char *name; /* port name */ struct attribute_group *attr_group; /* port specific attributes */ const struct attribute_group **tty_groups; /* all attributes (serial core use only) */ struct serial_rs485 rs485; struct serial_rs485 rs485_supported; /* Supported mask for serial_rs485 */ struct gpio_desc *rs485_term_gpio; /* enable RS485 bus termination */ struct gpio_desc *rs485_rx_during_tx_gpio; /* Output GPIO that sets the state of RS485 RX during TX */ struct serial_iso7816 iso7816; void *private_data; /* generic platform data pointer */ }; /* * Only for console->device_lock()/_unlock() callbacks and internal * port lock wrapper synchronization. */ static inline void __uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags) { spin_lock_irqsave(&up->lock, *flags); } /* * Only for console->device_lock()/_unlock() callbacks and internal * port lock wrapper synchronization. */ static inline void __uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags) { spin_unlock_irqrestore(&up->lock, flags); } /** * uart_port_set_cons - Safely set the @cons field for a uart * @up: The uart port to set * @con: The new console to set to * * This function must be used to set @up->cons. It uses the port lock to * synchronize with the port lock wrappers in order to ensure that the console * cannot change or disappear while another context is holding the port lock. */ static inline void uart_port_set_cons(struct uart_port *up, struct console *con) { unsigned long flags; __uart_port_lock_irqsave(up, &flags); up->cons = con; __uart_port_unlock_irqrestore(up, flags); } /* Only for internal port lock wrapper usage. */ static inline bool __uart_port_using_nbcon(struct uart_port *up) { lockdep_assert_held_once(&up->lock); if (likely(!uart_console(up))) return false; /* * @up->cons is only modified under the port lock. Therefore it is * certain that it cannot disappear here. * * @up->cons->node is added/removed from the console list under the * port lock. Therefore it is certain that the registration status * cannot change here, thus @up->cons->flags can be read directly. */ if (hlist_unhashed_lockless(&up->cons->node) || !(up->cons->flags & CON_NBCON) || !up->cons->write_atomic) { return false; } return true; } /* Only for internal port lock wrapper usage. */ static inline bool __uart_port_nbcon_try_acquire(struct uart_port *up) { if (!__uart_port_using_nbcon(up)) return true; return nbcon_device_try_acquire(up->cons); } /* Only for internal port lock wrapper usage. */ static inline void __uart_port_nbcon_acquire(struct uart_port *up) { if (!__uart_port_using_nbcon(up)) return; while (!nbcon_device_try_acquire(up->cons)) cpu_relax(); } /* Only for internal port lock wrapper usage. */ static inline void __uart_port_nbcon_release(struct uart_port *up) { if (!__uart_port_using_nbcon(up)) return; nbcon_device_release(up->cons); } /** * uart_port_lock - Lock the UART port * @up: Pointer to UART port structure */ static inline void uart_port_lock(struct uart_port *up) { spin_lock(&up->lock); __uart_port_nbcon_acquire(up); } /** * uart_port_lock_irq - Lock the UART port and disable interrupts * @up: Pointer to UART port structure */ static inline void uart_port_lock_irq(struct uart_port *up) { spin_lock_irq(&up->lock); __uart_port_nbcon_acquire(up); } /** * uart_port_lock_irqsave - Lock the UART port, save and disable interrupts * @up: Pointer to UART port structure * @flags: Pointer to interrupt flags storage */ static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags) { spin_lock_irqsave(&up->lock, *flags); __uart_port_nbcon_acquire(up); } /** * uart_port_trylock - Try to lock the UART port * @up: Pointer to UART port structure * * Returns: True if lock was acquired, false otherwise */ static inline bool uart_port_trylock(struct uart_port *up) { if (!spin_trylock(&up->lock)) return false; if (!__uart_port_nbcon_try_acquire(up)) { spin_unlock(&up->lock); return false; } return true; } /** * uart_port_trylock_irqsave - Try to lock the UART port, save and disable interrupts * @up: Pointer to UART port structure * @flags: Pointer to interrupt flags storage * * Returns: True if lock was acquired, false otherwise */ static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags) { if (!spin_trylock_irqsave(&up->lock, *flags)) return false; if (!__uart_port_nbcon_try_acquire(up)) { spin_unlock_irqrestore(&up->lock, *flags); return false; } return true; } /** * uart_port_unlock - Unlock the UART port * @up: Pointer to UART port structure */ static inline void uart_port_unlock(struct uart_port *up) { __uart_port_nbcon_release(up); spin_unlock(&up->lock); } /** * uart_port_unlock_irq - Unlock the UART port and re-enable interrupts * @up: Pointer to UART port structure */ static inline void uart_port_unlock_irq(struct uart_port *up) { __uart_port_nbcon_release(up); spin_unlock_irq(&up->lock); } /** * uart_port_unlock_irqrestore - Unlock the UART port, restore interrupts * @up: Pointer to UART port structure * @flags: The saved interrupt flags for restore */ static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags) { __uart_port_nbcon_release(up); spin_unlock_irqrestore(&up->lock, flags); } DEFINE_GUARD(uart_port_lock, struct uart_port *, uart_port_lock(_T), uart_port_unlock(_T)); DEFINE_GUARD_COND(uart_port_lock, _try, uart_port_trylock(_T)); DEFINE_GUARD(uart_port_lock_irq, struct uart_port *, uart_port_lock_irq(_T), uart_port_unlock_irq(_T)); DEFINE_LOCK_GUARD_1(uart_port_lock_irqsave, struct uart_port, uart_port_lock_irqsave(_T->lock, &_T->flags), uart_port_unlock_irqrestore(_T->lock, _T->flags), unsigned long flags); DEFINE_LOCK_GUARD_1_COND(uart_port_lock_irqsave, _try, uart_port_trylock_irqsave(_T->lock, &_T->flags)); static inline int serial_port_in(struct uart_port *up, int offset) { return up->serial_in(up, offset); } static inline void serial_port_out(struct uart_port *up, int offset, int value) { up->serial_out(up, offset, value); } /** * enum uart_pm_state - power states for UARTs * @UART_PM_STATE_ON: UART is powered, up and operational * @UART_PM_STATE_OFF: UART is powered off * @UART_PM_STATE_UNDEFINED: sentinel */ enum uart_pm_state { UART_PM_STATE_ON = 0, UART_PM_STATE_OFF = 3, /* number taken from ACPI */ UART_PM_STATE_UNDEFINED, }; /* * This is the state information which is persistent across opens. */ struct uart_state { struct tty_port port; enum uart_pm_state pm_state; atomic_t refcount; wait_queue_head_t remove_wait; struct uart_port *uart_port; }; #define UART_XMIT_SIZE PAGE_SIZE /* number of characters left in xmit buffer before we ask for more */ #define WAKEUP_CHARS 256 /** * uart_xmit_advance - Advance xmit buffer and account Tx'ed chars * @up: uart_port structure describing the port * @chars: number of characters sent * * This function advances the tail of circular xmit buffer by the number of * @chars transmitted and handles accounting of transmitted bytes (into * @up's icount.tx). */ static inline void uart_xmit_advance(struct uart_port *up, unsigned int chars) { struct tty_port *tport = &up->state->port; kfifo_skip_count(&tport->xmit_fifo, chars); up->icount.tx += chars; } static inline unsigned int uart_fifo_out(struct uart_port *up, unsigned char *buf, unsigned int chars) { struct tty_port *tport = &up->state->port; chars = kfifo_out(&tport->xmit_fifo, buf, chars); up->icount.tx += chars; return chars; } static inline unsigned int uart_fifo_get(struct uart_port *up, unsigned char *ch) { struct tty_port *tport = &up->state->port; unsigned int chars; chars = kfifo_get(&tport->xmit_fifo, ch); up->icount.tx += chars; return chars; } struct module; struct tty_driver; struct uart_driver { struct module *owner; const char *driver_name; const char *dev_name; int major; int minor; int nr; struct console *cons; /* * these are private; the low level driver should not * touch these; they should be initialised to NULL */ struct uart_state *state; struct tty_driver *tty_driver; }; void uart_write_wakeup(struct uart_port *port); /** * enum UART_TX_FLAGS -- flags for uart_port_tx_flags() * * @UART_TX_NOSTOP: don't call port->ops->stop_tx() on empty buffer */ enum UART_TX_FLAGS { UART_TX_NOSTOP = BIT(0), }; #define __uart_port_tx(uport, ch, flags, tx_ready, put_char, tx_done, \ for_test, for_post) \ ({ \ struct uart_port *__port = (uport); \ struct tty_port *__tport = &__port->state->port; \ unsigned int pending; \ \ for (; (for_test) && (tx_ready); (for_post), __port->icount.tx++) { \ if (__port->x_char) { \ (ch) = __port->x_char; \ (put_char); \ __port->x_char = 0; \ continue; \ } \ \ if (uart_tx_stopped(__port)) \ break; \ \ if (!kfifo_get(&__tport->xmit_fifo, &(ch))) \ break; \ \ (put_char); \ } \ \ (tx_done); \ \ pending = kfifo_len(&__tport->xmit_fifo); \ if (pending < WAKEUP_CHARS) { \ uart_write_wakeup(__port); \ \ if (!((flags) & UART_TX_NOSTOP) && pending == 0) \ __port->ops->stop_tx(__port); \ } \ \ pending; \ }) /** * uart_port_tx_limited -- transmit helper for uart_port with count limiting * @port: uart port * @ch: variable to store a character to be written to the HW * @count: a limit of characters to send * @tx_ready: can HW accept more data function * @put_char: function to write a character * @tx_done: function to call after the loop is done * * This helper transmits characters from the xmit buffer to the hardware using * @put_char(). It does so until @count characters are sent and while @tx_ready * evaluates to true. * * Returns: the number of characters in the xmit buffer when done. * * The expression in macro parameters shall be designed as follows: * * **tx_ready:** should evaluate to true if the HW can accept more data to * be sent. This parameter can be %true, which means the HW is always ready. * * **put_char:** shall write @ch to the device of @port. * * **tx_done:** when the write loop is done, this can perform arbitrary * action before potential invocation of ops->stop_tx() happens. If the * driver does not need to do anything, use e.g. ({}). * * For all of them, @port->lock is held, interrupts are locally disabled and * the expressions must not sleep. */ #define uart_port_tx_limited(port, ch, count, tx_ready, put_char, tx_done) ({ \ unsigned int __count = (count); \ __uart_port_tx(port, ch, 0, tx_ready, put_char, tx_done, __count, \ __count--); \ }) /** * uart_port_tx_limited_flags -- transmit helper for uart_port with count limiting with flags * @port: uart port * @ch: variable to store a character to be written to the HW * @flags: %UART_TX_NOSTOP or similar * @count: a limit of characters to send * @tx_ready: can HW accept more data function * @put_char: function to write a character * @tx_done: function to call after the loop is done * * See uart_port_tx_limited() for more details. */ #define uart_port_tx_limited_flags(port, ch, flags, count, tx_ready, put_char, tx_done) ({ \ unsigned int __count = (count); \ __uart_port_tx(port, ch, flags, tx_ready, put_char, tx_done, __count, \ __count--); \ }) /** * uart_port_tx -- transmit helper for uart_port * @port: uart port * @ch: variable to store a character to be written to the HW * @tx_ready: can HW accept more data function * @put_char: function to write a character * * See uart_port_tx_limited() for more details. */ #define uart_port_tx(port, ch, tx_ready, put_char) \ __uart_port_tx(port, ch, 0, tx_ready, put_char, ({}), true, ({})) /** * uart_port_tx_flags -- transmit helper for uart_port with flags * @port: uart port * @ch: variable to store a character to be written to the HW * @flags: %UART_TX_NOSTOP or similar * @tx_ready: can HW accept more data function * @put_char: function to write a character * * See uart_port_tx_limited() for more details. */ #define uart_port_tx_flags(port, ch, flags, tx_ready, put_char) \ __uart_port_tx(port, ch, flags, tx_ready, put_char, ({}), true, ({})) /* * Baud rate helpers. */ void uart_update_timeout(struct uart_port *port, unsigned int cflag, unsigned int baud); unsigned int uart_get_baud_rate(struct uart_port *port, struct ktermios *termios, const struct ktermios *old, unsigned int min, unsigned int max); unsigned int uart_get_divisor(struct uart_port *port, unsigned int baud); /* * Calculates FIFO drain time. */ static inline unsigned long uart_fifo_timeout(struct uart_port *port) { u64 fifo_timeout = (u64)READ_ONCE(port->frame_time) * port->fifosize; /* Add .02 seconds of slop */ fifo_timeout += 20 * NSEC_PER_MSEC; return max(nsecs_to_jiffies(fifo_timeout), 1UL); } /* Base timer interval for polling */ static inline unsigned long uart_poll_timeout(struct uart_port *port) { unsigned long timeout = uart_fifo_timeout(port); return timeout > 6 ? (timeout / 2 - 2) : 1; } /* * Console helpers. */ struct earlycon_device { struct console *con; struct uart_port port; char options[32]; /* e.g., 115200n8 */ unsigned int baud; }; struct earlycon_id { char name[15]; char name_term; /* In case compiler didn't '\0' term name */ char compatible[128]; int (*setup)(struct earlycon_device *, const char *options); }; extern const struct earlycon_id __earlycon_table[]; extern const struct earlycon_id __earlycon_table_end[]; #if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE) #define EARLYCON_USED_OR_UNUSED __used #else #define EARLYCON_USED_OR_UNUSED __maybe_unused #endif #define OF_EARLYCON_DECLARE(_name, compat, fn) \ static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \ EARLYCON_USED_OR_UNUSED __section("__earlycon_table") \ __aligned(__alignof__(struct earlycon_id)) \ = { .name = __stringify(_name), \ .compatible = compat, \ .setup = fn } #define EARLYCON_DECLARE(_name, fn) OF_EARLYCON_DECLARE(_name, "", fn) int of_setup_earlycon(const struct earlycon_id *match, unsigned long node, const char *options); #ifdef CONFIG_SERIAL_EARLYCON extern bool earlycon_acpi_spcr_enable __initdata; int setup_earlycon(char *buf); #else static const bool earlycon_acpi_spcr_enable EARLYCON_USED_OR_UNUSED; static inline int setup_earlycon(char *buf) { return 0; } #endif /* Variant of uart_console_registered() when the console_list_lock is held. */ static inline bool uart_console_registered_locked(struct uart_port *port) { return uart_console(port) && console_is_registered_locked(port->cons); } static inline bool uart_console_registered(struct uart_port *port) { return uart_console(port) && console_is_registered(port->cons); } int uart_parse_earlycon(char *p, enum uart_iotype *iotype, resource_size_t *addr, char **options); void uart_parse_options(const char *options, int *baud, int *parity, int *bits, int *flow); int uart_set_options(struct uart_port *port, struct console *co, int baud, int parity, int bits, int flow); struct tty_driver *uart_console_device(struct console *co, int *index); void uart_console_write(struct uart_port *port, const char *s, unsigned int count, void (*putchar)(struct uart_port *, unsigned char)); /* * Port/driver registration/removal */ int uart_register_driver(struct uart_driver *uart); void uart_unregister_driver(struct uart_driver *uart); int uart_add_one_port(struct uart_driver *reg, struct uart_port *port); void uart_remove_one_port(struct uart_driver *reg, struct uart_port *port); int uart_read_port_properties(struct uart_port *port); int uart_read_and_validate_port_properties(struct uart_port *port); bool uart_match_port(const struct uart_port *port1, const struct uart_port *port2); /* * Power Management */ int uart_suspend_port(struct uart_driver *reg, struct uart_port *port); int uart_resume_port(struct uart_driver *reg, struct uart_port *port); static inline int uart_tx_stopped(struct uart_port *port) { struct tty_struct *tty = port->state->port.tty; if ((tty && tty->flow.stopped) || port->hw_stopped) return 1; return 0; } static inline bool uart_cts_enabled(struct uart_port *uport) { return !!(uport->status & UPSTAT_CTS_ENABLE); } static inline bool uart_softcts_mode(struct uart_port *uport) { upstat_t mask = UPSTAT_CTS_ENABLE | UPSTAT_AUTOCTS; return ((uport->status & mask) == UPSTAT_CTS_ENABLE); } /* * The following are helper functions for the low level drivers. */ void uart_handle_dcd_change(struct uart_port *uport, bool active); void uart_handle_cts_change(struct uart_port *uport, bool active); void uart_insert_char(struct uart_port *port, unsigned int status, unsigned int overrun, u8 ch, u8 flag); void uart_xchar_out(struct uart_port *uport, int offset); #ifdef CONFIG_MAGIC_SYSRQ_SERIAL #define SYSRQ_TIMEOUT (HZ * 5) bool uart_try_toggle_sysrq(struct uart_port *port, u8 ch); static inline int uart_handle_sysrq_char(struct uart_port *port, u8 ch) { if (!port->sysrq) return 0; if (ch && time_before(jiffies, port->sysrq)) { if (sysrq_mask()) { handle_sysrq(ch); port->sysrq = 0; return 1; } if (uart_try_toggle_sysrq(port, ch)) return 1; } port->sysrq = 0; return 0; } static inline int uart_prepare_sysrq_char(struct uart_port *port, u8 ch) { if (!port->sysrq) return 0; if (ch && time_before(jiffies, port->sysrq)) { if (sysrq_mask()) { port->sysrq_ch = ch; port->sysrq = 0; return 1; } if (uart_try_toggle_sysrq(port, ch)) return 1; } port->sysrq = 0; return 0; } static inline void uart_unlock_and_check_sysrq(struct uart_port *port) { u8 sysrq_ch; if (!port->has_sysrq) { uart_port_unlock(port); return; } sysrq_ch = port->sysrq_ch; port->sysrq_ch = 0; uart_port_unlock(port); if (sysrq_ch) handle_sysrq(sysrq_ch); } static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port, unsigned long flags) { u8 sysrq_ch; if (!port->has_sysrq) { uart_port_unlock_irqrestore(port, flags); return; } sysrq_ch = port->sysrq_ch; port->sysrq_ch = 0; uart_port_unlock_irqrestore(port, flags); if (sysrq_ch) handle_sysrq(sysrq_ch); } #else /* CONFIG_MAGIC_SYSRQ_SERIAL */ static inline int uart_handle_sysrq_char(struct uart_port *port, u8 ch) { return 0; } static inline int uart_prepare_sysrq_char(struct uart_port *port, u8 ch) { return 0; } static inline void uart_unlock_and_check_sysrq(struct uart_port *port) { uart_port_unlock(port); } static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port, unsigned long flags) { uart_port_unlock_irqrestore(port, flags); } #endif /* CONFIG_MAGIC_SYSRQ_SERIAL */ /* * We do the SysRQ and SAK checking like this... */ static inline int uart_handle_break(struct uart_port *port) { struct uart_state *state = port->state; if (port->handle_break) port->handle_break(port); #ifdef CONFIG_MAGIC_SYSRQ_SERIAL if (port->has_sysrq && uart_console(port)) { if (!port->sysrq) { port->sysrq = jiffies + SYSRQ_TIMEOUT; return 1; } port->sysrq = 0; } #endif if (port->flags & UPF_SAK) do_SAK(state->port.tty); return 0; } /* * UART_ENABLE_MS - determine if port should enable modem status irqs */ #define UART_ENABLE_MS(port,cflag) ((port)->flags & UPF_HARDPPS_CD || \ (cflag) & CRTSCTS || \ !((cflag) & CLOCAL)) int uart_get_rs485_mode(struct uart_port *port); #endif /* LINUX_SERIAL_CORE_H */
14 13 13 15 15 4 4 9 10 9 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* md.h : kernel internal structure of the Linux MD driver Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman */ #ifndef _MD_MD_H #define _MD_MD_H #include <linux/blkdev.h> #include <linux/backing-dev.h> #include <linux/badblocks.h> #include <linux/kobject.h> #include <linux/list.h> #include <linux/mm.h> #include <linux/mutex.h> #include <linux/timer.h> #include <linux/wait.h> #include <linux/workqueue.h> #include <linux/raid/md_u.h> #include <trace/events/block.h> #define MaxSector (~(sector_t)0) /* * Number of guaranteed raid bios in case of extreme VM load: */ #define NR_RAID_BIOS 256 enum md_submodule_type { MD_PERSONALITY = 0, MD_CLUSTER, MD_BITMAP, }; enum md_submodule_id { ID_LINEAR = LEVEL_LINEAR, ID_RAID0 = 0, ID_RAID1 = 1, ID_RAID4 = 4, ID_RAID5 = 5, ID_RAID6 = 6, ID_RAID10 = 10, ID_CLUSTER, ID_BITMAP, ID_LLBITMAP, ID_BITMAP_NONE, }; struct md_submodule_head { enum md_submodule_type type; enum md_submodule_id id; const char *name; struct module *owner; }; /* * These flags should really be called "NO_RETRY" rather than * "FAILFAST" because they don't make any promise about time lapse, * only about the number of retries, which will be zero. * REQ_FAILFAST_DRIVER is not included because * Commit: 4a27446f3e39 ("[SCSI] modify scsi to handle new fail fast flags.") * seems to suggest that the errors it avoids retrying should usually * be retried. */ #define MD_FAILFAST (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT) /* Status of sync thread. */ enum sync_action { /* * Represent by MD_RECOVERY_SYNC, start when: * 1) after assemble, sync data from first rdev to other copies, this * must be done first before other sync actions and will only execute * once; * 2) resize the array(notice that this is not reshape), sync data for * the new range; */ ACTION_RESYNC, /* * Represent by MD_RECOVERY_RECOVER, start when: * 1) for new replacement, sync data based on the replace rdev or * available copies from other rdev; * 2) for new member disk while the array is degraded, sync data from * other rdev; * 3) reassemble after power failure or re-add a hot removed rdev, sync * data from first rdev to other copies based on bitmap; */ ACTION_RECOVER, /* * Represent by MD_RECOVERY_SYNC | MD_RECOVERY_REQUESTED | * MD_RECOVERY_CHECK, start when user echo "check" to sysfs api * sync_action, used to check if data copies from differenct rdev are * the same. The number of mismatch sectors will be exported to user * by sysfs api mismatch_cnt; */ ACTION_CHECK, /* * Represent by MD_RECOVERY_SYNC | MD_RECOVERY_REQUESTED, start when * user echo "repair" to sysfs api sync_action, usually paired with * ACTION_CHECK, used to force syncing data once user found that there * are inconsistent data, */ ACTION_REPAIR, /* * Represent by MD_RECOVERY_RESHAPE, start when new member disk is added * to the conf, notice that this is different from spares or * replacement; */ ACTION_RESHAPE, /* * Represent by MD_RECOVERY_FROZEN, can be set by sysfs api sync_action * or internal usage like setting the array read-only, will forbid above * actions. */ ACTION_FROZEN, /* * All above actions don't match. */ ACTION_IDLE, NR_SYNC_ACTIONS, }; /* * The struct embedded in rdev is used to serialize IO. */ struct serial_in_rdev { struct rb_root_cached serial_rb; spinlock_t serial_lock; wait_queue_head_t serial_io_wait; }; /* * MD's 'extended' device */ struct md_rdev { struct list_head same_set; /* RAID devices within the same set */ sector_t sectors; /* Device size (in 512bytes sectors) */ struct mddev *mddev; /* RAID array if running */ unsigned long last_events; /* IO event timestamp */ /* * If meta_bdev is non-NULL, it means that a separate device is * being used to store the metadata (superblock/bitmap) which * would otherwise be contained on the same device as the data (bdev). */ struct block_device *meta_bdev; struct block_device *bdev; /* block device handle */ struct file *bdev_file; /* Handle from open for bdev */ struct page *sb_page, *bb_page; int sb_loaded; __u64 sb_events; sector_t data_offset; /* start of data in array */ sector_t new_data_offset;/* only relevant while reshaping */ sector_t sb_start; /* offset of the super block (in 512byte sectors) */ int sb_size; /* bytes in the superblock */ int preferred_minor; /* autorun support */ struct kobject kobj; /* A device can be in one of three states based on two flags: * Not working: faulty==1 in_sync==0 * Fully working: faulty==0 in_sync==1 * Working, but not * in sync with array * faulty==0 in_sync==0 * * It can never have faulty==1, in_sync==1 * This reduces the burden of testing multiple flags in many cases */ unsigned long flags; /* bit set of 'enum flag_bits' bits. */ wait_queue_head_t blocked_wait; int desc_nr; /* descriptor index in the superblock */ int raid_disk; /* role of device in array */ int new_raid_disk; /* role that the device will have in * the array after a level-change completes. */ int saved_raid_disk; /* role that device used to have in the * array and could again if we did a partial * resync from the bitmap */ union { sector_t recovery_offset;/* If this device has been partially * recovered, this is where we were * up to. */ sector_t journal_tail; /* If this device is a journal device, * this is the journal tail (journal * recovery start point) */ }; atomic_t nr_pending; /* number of pending requests. * only maintained for arrays that * support hot removal */ atomic_t read_errors; /* number of consecutive read errors that * we have tried to ignore. */ time64_t last_read_error; /* monotonic time since our * last read error */ atomic_t corrected_errors; /* number of corrected read errors, * for reporting to userspace and storing * in superblock. */ struct serial_in_rdev *serial; /* used for raid1 io serialization */ struct kernfs_node *sysfs_state; /* handle for 'state' * sysfs entry */ /* handle for 'unacknowledged_bad_blocks' sysfs dentry */ struct kernfs_node *sysfs_unack_badblocks; /* handle for 'bad_blocks' sysfs dentry */ struct kernfs_node *sysfs_badblocks; struct badblocks badblocks; struct { short offset; /* Offset from superblock to start of PPL. * Not used by external metadata. */ unsigned int size; /* Size in sectors of the PPL space */ sector_t sector; /* First sector of the PPL space */ } ppl; }; enum flag_bits { Faulty, /* device is known to have a fault */ In_sync, /* device is in_sync with rest of array */ Bitmap_sync, /* ..actually, not quite In_sync. Need a * bitmap-based recovery to get fully in sync. * The bit is only meaningful before device * has been passed to pers->hot_add_disk. */ WriteMostly, /* Avoid reading if at all possible */ AutoDetected, /* added by auto-detect */ Blocked, /* An error occurred but has not yet * been acknowledged by the metadata * handler, so don't allow writes * until it is cleared */ WriteErrorSeen, /* A write error has been seen on this * device */ FaultRecorded, /* Intermediate state for clearing * Blocked. The Fault is/will-be * recorded in the metadata, but that * metadata hasn't been stored safely * on disk yet. */ BlockedBadBlocks, /* A writer is blocked because they * found an unacknowledged bad-block. * This can safely be cleared at any * time, and the writer will re-check. * It may be set at any time, and at * worst the writer will timeout and * re-check. So setting it as * accurately as possible is good, but * not absolutely critical. */ WantReplacement, /* This device is a candidate to be * hot-replaced, either because it has * reported some faults, or because * of explicit request. */ Replacement, /* This device is a replacement for * a want_replacement device with same * raid_disk number. */ Candidate, /* For clustered environments only: * This device is seen locally but not * by the whole cluster */ Journal, /* This device is used as journal for * raid-5/6. * Usually, this device should be faster * than other devices in the array */ ClusterRemove, ExternalBbl, /* External metadata provides bad * block management for a disk */ FailFast, /* Minimal retries should be attempted on * this device, so use REQ_FAILFAST_DEV. * Also don't try to repair failed reads. * It is expects that no bad block log * is present. */ LastDev, /* Seems to be the last working dev as * it didn't fail, so don't use FailFast * any more for metadata */ CollisionCheck, /* * check if there is collision between raid1 * serial bios. */ Nonrot, /* non-rotational device (SSD) */ }; static inline int is_badblock(struct md_rdev *rdev, sector_t s, sector_t sectors, sector_t *first_bad, sector_t *bad_sectors) { if (unlikely(rdev->badblocks.count)) { int rv = badblocks_check(&rdev->badblocks, rdev->data_offset + s, sectors, first_bad, bad_sectors); if (rv) *first_bad -= rdev->data_offset; return rv; } return 0; } static inline int rdev_has_badblock(struct md_rdev *rdev, sector_t s, int sectors) { sector_t first_bad; sector_t bad_sectors; return is_badblock(rdev, s, sectors, &first_bad, &bad_sectors); } extern bool rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, int is_new); extern void rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, int is_new); struct md_cluster_info; struct md_cluster_operations; /** * enum mddev_flags - md device flags. * @MD_ARRAY_FIRST_USE: First use of array, needs initialization. * @MD_CLOSING: If set, we are closing the array, do not open it then. * @MD_JOURNAL_CLEAN: A raid with journal is already clean. * @MD_HAS_JOURNAL: The raid array has journal feature set. * @MD_CLUSTER_RESYNC_LOCKED: cluster raid only, which means node, already took * resync lock, need to release the lock. * @MD_FAILFAST_SUPPORTED: Using MD_FAILFAST on metadata writes is supported as * calls to md_error() will never cause the array to * become failed. * @MD_HAS_PPL: The raid array has PPL feature set. * @MD_HAS_MULTIPLE_PPLS: The raid array has multiple PPLs feature set. * @MD_NOT_READY: do_md_run() is active, so 'array_state', ust not report that * array is ready yet. * @MD_BROKEN: This is used to stop writes and mark array as failed. * @MD_DELETED: This device is being deleted * @MD_HAS_SUPERBLOCK: There is persistence sb in member disks. * @MD_FAILLAST_DEV: Allow last rdev to be removed. * @MD_SERIALIZE_POLICY: Enforce write IO is not reordered, just used by raid1. * * change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added */ enum mddev_flags { MD_ARRAY_FIRST_USE, MD_CLOSING, MD_JOURNAL_CLEAN, MD_HAS_JOURNAL, MD_CLUSTER_RESYNC_LOCKED, MD_FAILFAST_SUPPORTED, MD_HAS_PPL, MD_HAS_MULTIPLE_PPLS, MD_NOT_READY, MD_BROKEN, MD_DO_DELETE, MD_DELETED, MD_HAS_SUPERBLOCK, MD_FAILLAST_DEV, MD_SERIALIZE_POLICY, }; enum mddev_sb_flags { MD_SB_CHANGE_DEVS, /* Some device status has changed */ MD_SB_CHANGE_CLEAN, /* transition to or from 'clean' */ MD_SB_CHANGE_PENDING, /* switch from 'clean' to 'active' in progress */ MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */ }; #define NR_SERIAL_INFOS 8 /* record current range of serialize IOs */ struct serial_info { struct rb_node node; sector_t start; /* start sector of rb node */ sector_t last; /* end sector of rb node */ sector_t _subtree_last; /* highest sector in subtree of rb node */ }; /* * mddev->curr_resync stores the current sector of the resync but * also has some overloaded values. */ enum { /* No resync in progress */ MD_RESYNC_NONE = 0, /* Yielded to allow another conflicting resync to commence */ MD_RESYNC_YIELDED = 1, /* Delayed to check that there is no conflict with another sync */ MD_RESYNC_DELAYED = 2, /* Any value greater than or equal to this is in an active resync */ MD_RESYNC_ACTIVE = 3, }; struct mddev { void *private; struct md_personality *pers; dev_t unit; int md_minor; struct list_head disks; unsigned long flags; unsigned long sb_flags; int suspended; struct mutex suspend_mutex; struct percpu_ref active_io; int ro; int sysfs_active; /* set when sysfs deletes * are happening, so run/ * takeover/stop are not safe */ struct gendisk *gendisk; /* mdraid gendisk */ struct gendisk *dm_gendisk; /* dm-raid gendisk */ struct kobject kobj; int hold_active; #define UNTIL_IOCTL 1 #define UNTIL_STOP 2 /* Superblock information */ int major_version, minor_version, patch_version; int persistent; int external; /* metadata is * managed externally */ char metadata_type[17]; /* externally set*/ int chunk_sectors; time64_t ctime, utime; int level, layout; char clevel[16]; int raid_disks; int max_disks; sector_t dev_sectors; /* used size of * component devices */ sector_t array_sectors; /* exported array size */ int external_size; /* size managed * externally */ unsigned int logical_block_size; __u64 events; /* If the last 'event' was simply a clean->dirty transition, and * we didn't write it to the spares, then it is safe and simple * to just decrement the event count on a dirty->clean transition. * So we record that possibility here. */ int can_decrease_events; char uuid[16]; /* If the array is being reshaped, we need to record the * new shape and an indication of where we are up to. * This is written to the superblock. * If reshape_position is MaxSector, then no reshape is happening (yet). */ sector_t reshape_position; int delta_disks, new_level, new_layout; int new_chunk_sectors; int reshape_backwards; struct md_thread __rcu *thread; /* management thread */ struct md_thread __rcu *sync_thread; /* doing resync or reconstruct */ /* * Set when a sync operation is started. It holds this value even * when the sync thread is "frozen" (interrupted) or "idle" (stopped * or finished). It is overwritten when a new sync operation is begun. */ enum sync_action last_sync_action; sector_t curr_resync; /* last block scheduled */ /* As resync requests can complete out of order, we cannot easily track * how much resync has been completed. So we occasionally pause until * everything completes, then set curr_resync_completed to curr_resync. * As such it may be well behind the real resync mark, but it is a value * we are certain of. */ sector_t curr_resync_completed; unsigned long resync_mark; /* a recent timestamp */ sector_t resync_mark_cnt;/* blocks written at resync_mark */ sector_t curr_mark_cnt; /* blocks scheduled now */ sector_t resync_max_sectors; /* may be set by personality */ atomic64_t resync_mismatches; /* count of sectors where * parity/replica mismatch found */ /* allow user-space to request suspension of IO to regions of the array */ sector_t suspend_lo; sector_t suspend_hi; /* if zero, use the system-wide default */ int sync_speed_min; int sync_speed_max; int sync_io_depth; /* resync even though the same disks are shared among md-devices */ int parallel_resync; int ok_start_degraded; unsigned long recovery; int in_sync; /* know to not need resync */ /* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so * that we are never stopping an array while it is open. * 'reconfig_mutex' protects all other reconfiguration. * These locks are separate due to conflicting interactions * with disk->open_mutex. * Lock ordering is: * reconfig_mutex -> disk->open_mutex * disk->open_mutex -> open_mutex: e.g. __blkdev_get -> md_open */ struct mutex open_mutex; struct mutex reconfig_mutex; atomic_t active; /* general refcount */ atomic_t openers; /* number of active opens */ int changed; /* True if we might need to * reread partition info */ int degraded; /* whether md should consider * adding a spare */ unsigned long normal_io_events; /* IO event timestamp */ atomic_t recovery_active; /* blocks scheduled, but not written */ wait_queue_head_t recovery_wait; sector_t resync_offset; sector_t resync_min; /* user requested sync * starts here */ sector_t resync_max; /* resync should pause * when it gets here */ struct kernfs_node *sysfs_state; /* handle for 'array_state' * file in sysfs. */ struct kernfs_node *sysfs_action; /* handle for 'sync_action' */ struct kernfs_node *sysfs_completed; /*handle for 'sync_completed' */ struct kernfs_node *sysfs_degraded; /*handle for 'degraded' */ struct kernfs_node *sysfs_level; /*handle for 'level' */ /* used for delayed sysfs removal */ struct work_struct del_work; /* used for register new sync thread */ struct work_struct sync_work; /* "lock" protects: * flush_bio transition from NULL to !NULL * rdev superblocks, events * clearing MD_CHANGE_* * in_sync - and related safemode and MD_CHANGE changes * pers (also protected by reconfig_mutex and pending IO). * clearing ->bitmap * clearing ->bitmap_info.file * changing ->resync_{min,max} * setting MD_RECOVERY_RUNNING (which interacts with resync_{min,max}) */ spinlock_t lock; wait_queue_head_t sb_wait; /* for waiting on superblock updates */ atomic_t pending_writes; /* number of active superblock writes */ unsigned int safemode; /* if set, update "clean" superblock * when no writes pending. */ unsigned int safemode_delay; struct timer_list safemode_timer; struct percpu_ref writes_pending; int sync_checkers; /* # of threads checking writes_pending */ enum md_submodule_id bitmap_id; void *bitmap; /* the bitmap for the device */ struct bitmap_operations *bitmap_ops; struct { struct file *file; /* the bitmap file */ loff_t offset; /* offset from superblock of * start of bitmap. May be * negative, but not '0' * For external metadata, offset * from start of device. */ unsigned long space; /* space available at this offset */ loff_t default_offset; /* this is the offset to use when * hot-adding a bitmap. It should * eventually be settable by sysfs. */ unsigned long default_space; /* space available at * default offset */ struct mutex mutex; unsigned long chunksize; unsigned long daemon_sleep; /* how many jiffies between updates? */ unsigned long max_write_behind; /* write-behind mode */ int external; int nodes; /* Maximum number of nodes in the cluster */ char cluster_name[64]; /* Name of the cluster */ } bitmap_info; atomic_t max_corr_read_errors; /* max read retries */ struct list_head all_mddevs; const struct attribute_group *to_remove; struct bio_set bio_set; struct bio_set sync_set; /* for sync operations like * metadata and bitmap writes */ struct bio_set io_clone_set; struct work_struct event_work; /* used by dm to report failure event */ mempool_t *serial_info_pool; void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); struct md_cluster_info *cluster_info; struct md_cluster_operations *cluster_ops; unsigned int good_device_nr; /* good device num within cluster raid */ unsigned int noio_flag; /* for memalloc scope API */ /* * Temporarily store rdev that will be finally removed when * reconfig_mutex is unlocked, protected by reconfig_mutex. */ struct list_head deleting; /* The sequence number for sync thread */ atomic_t sync_seq; }; enum recovery_flags { /* flags for sync thread running status */ /* * set when one of sync action is set and new sync thread need to be * registered, or just add/remove spares from conf. */ MD_RECOVERY_NEEDED, /* sync thread is running, or about to be started */ MD_RECOVERY_RUNNING, /* sync thread needs to be aborted for some reason */ MD_RECOVERY_INTR, /* sync thread is done and is waiting to be unregistered */ MD_RECOVERY_DONE, /* running sync thread must abort immediately, and not restart */ MD_RECOVERY_FROZEN, /* waiting for pers->start() to finish */ MD_RECOVERY_WAIT, /* flags determines sync action, see details in enum sync_action */ /* if just this flag is set, action is resync. */ MD_RECOVERY_SYNC, /* * paired with MD_RECOVERY_SYNC, if MD_RECOVERY_CHECK is not set, * action is repair, means user requested resync. */ MD_RECOVERY_REQUESTED, /* * paired with MD_RECOVERY_SYNC and MD_RECOVERY_REQUESTED, action is * check. */ MD_RECOVERY_CHECK, /* recovery, or need to try it */ MD_RECOVERY_RECOVER, /* reshape */ MD_RECOVERY_RESHAPE, /* remote node is running resync thread */ MD_RESYNCING_REMOTE, /* raid456 lazy initial recover */ MD_RECOVERY_LAZY_RECOVER, }; enum md_ro_state { MD_RDWR, MD_RDONLY, MD_AUTO_READ, MD_MAX_STATE }; static inline bool md_is_rdwr(struct mddev *mddev) { return (mddev->ro == MD_RDWR); } static inline bool reshape_interrupted(struct mddev *mddev) { /* reshape never start */ if (mddev->reshape_position == MaxSector) return false; /* interrupted */ if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) return true; /* running reshape will be interrupted soon. */ if (test_bit(MD_RECOVERY_WAIT, &mddev->recovery) || test_bit(MD_RECOVERY_INTR, &mddev->recovery) || test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) return true; return false; } static inline int __must_check mddev_lock(struct mddev *mddev) { int ret; ret = mutex_lock_interruptible(&mddev->reconfig_mutex); /* MD_DELETED is set in do_md_stop with reconfig_mutex. * So check it here. */ if (!ret && test_bit(MD_DELETED, &mddev->flags)) { ret = -ENODEV; mutex_unlock(&mddev->reconfig_mutex); } return ret; } /* Sometimes we need to take the lock in a situation where * failure due to interrupts is not acceptable. * It doesn't need to check MD_DELETED here, the owner which * holds the lock here can't be stopped. And all paths can't * call this function after do_md_stop. */ static inline void mddev_lock_nointr(struct mddev *mddev) { mutex_lock(&mddev->reconfig_mutex); } static inline int mddev_trylock(struct mddev *mddev) { int ret; ret = mutex_trylock(&mddev->reconfig_mutex); if (ret && test_bit(MD_DELETED, &mddev->flags)) { ret = 0; mutex_unlock(&mddev->reconfig_mutex); } return ret; } extern void mddev_unlock(struct mddev *mddev); struct md_personality { struct md_submodule_head head; bool __must_check (*make_request)(struct mddev *mddev, struct bio *bio); /* * start up works that do NOT require md_thread. tasks that * requires md_thread should go into start() */ int (*run)(struct mddev *mddev); /* start up works that require md threads */ int (*start)(struct mddev *mddev); void (*free)(struct mddev *mddev, void *priv); void (*status)(struct seq_file *seq, struct mddev *mddev); /* error_handler must set ->faulty and clear ->in_sync * if appropriate, and should abort recovery if needed */ void (*error_handler)(struct mddev *mddev, struct md_rdev *rdev); int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev); int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev); int (*spare_active) (struct mddev *mddev); sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, sector_t max_sector, int *skipped); int (*resize) (struct mddev *mddev, sector_t sectors); sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks); int (*check_reshape) (struct mddev *mddev); int (*start_reshape) (struct mddev *mddev); void (*finish_reshape) (struct mddev *mddev); void (*update_reshape_pos) (struct mddev *mddev); void (*prepare_suspend) (struct mddev *mddev); /* quiesce suspends or resumes internal processing. * 1 - stop new actions and wait for action io to complete * 0 - return to normal behaviour */ void (*quiesce) (struct mddev *mddev, int quiesce); /* takeover is used to transition an array from one * personality to another. The new personality must be able * to handle the data in the current layout. * e.g. 2drive raid1 -> 2drive raid5 * ndrive raid5 -> degraded n+1drive raid6 with special layout * If the takeover succeeds, a new 'private' structure is returned. * This needs to be installed and then ->run used to activate the * array. */ void *(*takeover) (struct mddev *mddev); /* Changes the consistency policy of an active array. */ int (*change_consistency_policy)(struct mddev *mddev, const char *buf); /* convert io ranges from array to bitmap */ void (*bitmap_sector)(struct mddev *mddev, sector_t *offset, unsigned long *sectors); }; struct md_sysfs_entry { struct attribute attr; ssize_t (*show)(struct mddev *, char *); ssize_t (*store)(struct mddev *, const char *, size_t); }; static inline struct kernfs_node *sysfs_get_dirent_safe(struct kernfs_node *sd, char *name) { if (sd) return sysfs_get_dirent(sd, name); return sd; } static inline void sysfs_notify_dirent_safe(struct kernfs_node *sd) { if (sd) sysfs_notify_dirent(sd); } static inline char * mdname (struct mddev * mddev) { return mddev->gendisk ? mddev->gendisk->disk_name : "mdX"; } static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev) { char nm[20]; if (!test_bit(Replacement, &rdev->flags) && !test_bit(Journal, &rdev->flags) && mddev->kobj.sd) { sprintf(nm, "rd%d", rdev->raid_disk); return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); } else return 0; } static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev) { char nm[20]; if (!test_bit(Replacement, &rdev->flags) && !test_bit(Journal, &rdev->flags) && mddev->kobj.sd) { sprintf(nm, "rd%d", rdev->raid_disk); sysfs_remove_link(&mddev->kobj, nm); } } /* * iterates through some rdev ringlist. It's safe to remove the * current 'rdev'. Dont touch 'tmp' though. */ #define rdev_for_each_list(rdev, tmp, head) \ list_for_each_entry_safe(rdev, tmp, head, same_set) /* * iterates through the 'same array disks' ringlist */ #define rdev_for_each(rdev, mddev) \ list_for_each_entry(rdev, &((mddev)->disks), same_set) #define rdev_for_each_safe(rdev, tmp, mddev) \ list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set) #define rdev_for_each_rcu(rdev, mddev) \ list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set) struct md_thread { void (*run) (struct md_thread *thread); struct mddev *mddev; wait_queue_head_t wqueue; unsigned long flags; struct task_struct *tsk; unsigned long timeout; void *private; }; struct md_io_clone { struct mddev *mddev; struct bio *orig_bio; unsigned long start_time; sector_t offset; unsigned long sectors; enum stat_group rw; struct bio bio_clone; }; #define THREAD_WAKEUP 0 #define md_wakeup_thread(thread) do { \ rcu_read_lock(); \ __md_wakeup_thread(thread); \ rcu_read_unlock(); \ } while (0) static inline void safe_put_page(struct page *p) { if (p) put_page(p); } int register_md_submodule(struct md_submodule_head *msh); void unregister_md_submodule(struct md_submodule_head *msh); extern struct md_thread *md_register_thread( void (*run)(struct md_thread *thread), struct mddev *mddev, const char *name); extern void md_unregister_thread(struct mddev *mddev, struct md_thread __rcu **threadp); extern void __md_wakeup_thread(struct md_thread __rcu *thread); extern void md_check_recovery(struct mddev *mddev); extern void md_reap_sync_thread(struct mddev *mddev); extern enum sync_action md_sync_action(struct mddev *mddev); extern enum sync_action md_sync_action_by_name(const char *page); extern const char *md_sync_action_name(enum sync_action action); extern void md_write_start(struct mddev *mddev, struct bio *bi); extern void md_write_inc(struct mddev *mddev, struct bio *bi); extern void md_write_end(struct mddev *mddev); extern void md_done_sync(struct mddev *mddev, int blocks); extern void md_sync_error(struct mddev *mddev); extern void md_error(struct mddev *mddev, struct md_rdev *rdev); extern void md_finish_reshape(struct mddev *mddev); void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev, struct bio *bio, sector_t start, sector_t size); void md_account_bio(struct mddev *mddev, struct bio **bio); void md_free_cloned_bio(struct bio *bio); extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio); void md_write_metadata(struct mddev *mddev, struct md_rdev *rdev, sector_t sector, int size, struct page *page, unsigned int offset); extern int md_super_wait(struct mddev *mddev); extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, struct page *page, blk_opf_t opf, bool metadata_op); extern void md_do_sync(struct md_thread *thread); extern void md_new_event(void); extern void md_allow_write(struct mddev *mddev); extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev); extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors); extern int md_check_no_bitmap(struct mddev *mddev); extern int md_integrity_register(struct mddev *mddev); extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale); extern int mddev_init(struct mddev *mddev); extern void mddev_destroy(struct mddev *mddev); void md_init_stacking_limits(struct queue_limits *lim); struct mddev *md_alloc(dev_t dev, char *name); void mddev_put(struct mddev *mddev); extern int md_run(struct mddev *mddev); extern int md_start(struct mddev *mddev); extern void md_stop(struct mddev *mddev); extern void md_stop_writes(struct mddev *mddev); extern int md_rdev_init(struct md_rdev *rdev); extern void md_rdev_clear(struct md_rdev *rdev); extern bool md_handle_request(struct mddev *mddev, struct bio *bio); extern int mddev_suspend(struct mddev *mddev, bool interruptible); extern void mddev_resume(struct mddev *mddev); extern void md_idle_sync_thread(struct mddev *mddev); extern void md_frozen_sync_thread(struct mddev *mddev); extern void md_unfrozen_sync_thread(struct mddev *mddev); extern void md_update_sb(struct mddev *mddev, int force); extern void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev); extern void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev); struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr); struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev); static inline bool is_rdev_broken(struct md_rdev *rdev) { return !disk_live(rdev->bdev->bd_disk); } static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev) { int faulty = test_bit(Faulty, &rdev->flags); if (atomic_dec_and_test(&rdev->nr_pending) && faulty) { set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); } } static inline int mddev_is_clustered(struct mddev *mddev) { return mddev->cluster_info && mddev->bitmap_info.nodes > 1; } /* clear unsupported mddev_flags */ static inline void mddev_clear_unsupported_flags(struct mddev *mddev, unsigned long unsupported_flags) { mddev->flags &= ~unsupported_flags; } static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio) { if (bio_op(bio) == REQ_OP_WRITE_ZEROES && !bio->bi_bdev->bd_disk->queue->limits.max_write_zeroes_sectors) mddev->gendisk->queue->limits.max_write_zeroes_sectors = 0; } static inline int mddev_suspend_and_lock(struct mddev *mddev) { int ret; ret = mddev_suspend(mddev, true); if (ret) return ret; ret = mddev_lock(mddev); if (ret) mddev_resume(mddev); return ret; } static inline void mddev_suspend_and_lock_nointr(struct mddev *mddev) { mddev_suspend(mddev, false); mutex_lock(&mddev->reconfig_mutex); } static inline void mddev_unlock_and_resume(struct mddev *mddev) { mddev_unlock(mddev); mddev_resume(mddev); } struct mdu_array_info_s; struct mdu_disk_info_s; extern int mdp_major; void md_autostart_arrays(int part); int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info); int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info); int do_md_run(struct mddev *mddev); #define MDDEV_STACK_INTEGRITY (1u << 0) int mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim, unsigned int flags); int mddev_stack_new_rdev(struct mddev *mddev, struct md_rdev *rdev); void mddev_update_io_opt(struct mddev *mddev, unsigned int nr_stripes); extern const struct block_device_operations md_fops; /* * MD devices can be used undeneath by DM, in which case ->gendisk is NULL. */ static inline bool mddev_is_dm(struct mddev *mddev) { return !mddev->gendisk; } static inline bool raid_is_456(struct mddev *mddev) { return mddev->level == ID_RAID4 || mddev->level == ID_RAID5 || mddev->level == ID_RAID6; } static inline void mddev_trace_remap(struct mddev *mddev, struct bio *bio, sector_t sector) { if (!mddev_is_dm(mddev)) trace_block_bio_remap(bio, disk_devt(mddev->gendisk), sector); } static inline bool rdev_blocked(struct md_rdev *rdev) { /* * Blocked will be set by error handler and cleared by daemon after * updating superblock, meanwhile write IO should be blocked to prevent * reading old data after power failure. */ if (test_bit(Blocked, &rdev->flags)) return true; /* * Faulty device should not be accessed anymore, there is no need to * wait for bad block to be acknowledged. */ if (test_bit(Faulty, &rdev->flags)) return false; /* rdev is blocked by badblocks. */ if (test_bit(BlockedBadBlocks, &rdev->flags)) return true; return false; } #define mddev_add_trace_msg(mddev, fmt, args...) \ do { \ if (!mddev_is_dm(mddev)) \ blk_add_trace_msg((mddev)->gendisk->queue, fmt, ##args); \ } while (0) #endif /* _MD_MD_H */
3322 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 #undef TRACE_SYSTEM #define TRACE_SYSTEM netlink #if !defined(_TRACE_NETLINK_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_NETLINK_H #include <linux/tracepoint.h> TRACE_EVENT(netlink_extack, TP_PROTO(const char *msg), TP_ARGS(msg), TP_STRUCT__entry( __string( msg, msg ) ), TP_fast_assign( __assign_str(msg); ), TP_printk("msg=%s", __get_str(msg)) ); #endif /* _TRACE_NETLINK_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
2 1 1 1 1 1 2 4 4 4 4 1 1 1 3 2 1 1 2 1 1 2 4 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 // SPDX-License-Identifier: GPL-2.0-only /* DVB USB compliant linux driver for Conexant USB reference design. * * The Conexant reference design I saw on their website was only for analogue * capturing (using the cx25842). The box I took to write this driver (reverse * engineered) is the one labeled Medion MD95700. In addition to the cx25842 * for analogue capturing it also has a cx22702 DVB-T demodulator on the main * board. Besides it has a atiremote (X10) and a USB2.0 hub onboard. * * Maybe it is a little bit premature to call this driver cxusb, but I assume * the USB protocol is identical or at least inherited from the reference * design, so it can be reused for the "analogue-only" device (if it will * appear at all). * * * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@posteo.de) * Copyright (C) 2006 Michael Krufky (mkrufky@linuxtv.org) * Copyright (C) 2006, 2007 Chris Pascoe (c.pascoe@itee.uq.edu.au) * Copyright (C) 2011, 2017 Maciej S. Szmigiero (mail@maciej.szmigiero.name) * * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information */ #include <media/tuner.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/vmalloc.h> #include "cxusb.h" #include "cx22702.h" #include "lgdt330x.h" #include "mt352.h" #include "mt352_priv.h" #include "zl10353.h" #include "xc2028.h" #include "tuner-simple.h" #include "mxl5005s.h" #include "max2165.h" #include "dib7000p.h" #include "dib0070.h" #include "lgs8gxx.h" #include "atbm8830.h" #include "si2168.h" #include "si2157.h" /* debug */ int dvb_usb_cxusb_debug; module_param_named(debug, dvb_usb_cxusb_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (see cxusb.h)." DVB_USB_DEBUG_STATUS); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); enum cxusb_table_index { MEDION_MD95700, DVICO_BLUEBIRD_LG064F_COLD, DVICO_BLUEBIRD_LG064F_WARM, DVICO_BLUEBIRD_DUAL_1_COLD, DVICO_BLUEBIRD_DUAL_1_WARM, DVICO_BLUEBIRD_LGZ201_COLD, DVICO_BLUEBIRD_LGZ201_WARM, DVICO_BLUEBIRD_TH7579_COLD, DVICO_BLUEBIRD_TH7579_WARM, DIGITALNOW_BLUEBIRD_DUAL_1_COLD, DIGITALNOW_BLUEBIRD_DUAL_1_WARM, DVICO_BLUEBIRD_DUAL_2_COLD, DVICO_BLUEBIRD_DUAL_2_WARM, DVICO_BLUEBIRD_DUAL_4, DVICO_BLUEBIRD_DVB_T_NANO_2, DVICO_BLUEBIRD_DVB_T_NANO_2_NFW_WARM, AVERMEDIA_VOLAR_A868R, DVICO_BLUEBIRD_DUAL_4_REV_2, CONEXANT_D680_DMB, MYGICA_D689, NR__cxusb_table_index }; static const struct usb_device_id cxusb_table[]; int cxusb_ctrl_msg(struct dvb_usb_device *d, u8 cmd, const u8 *wbuf, int wlen, u8 *rbuf, int rlen) { struct cxusb_state *st = d->priv; int ret; if (1 + wlen > MAX_XFER_SIZE) { warn("i2c wr: len=%d is too big!\n", wlen); return -EOPNOTSUPP; } if (rlen > MAX_XFER_SIZE) { warn("i2c rd: len=%d is too big!\n", rlen); return -EOPNOTSUPP; } mutex_lock(&d->data_mutex); st->data[0] = cmd; memcpy(&st->data[1], wbuf, wlen); ret = dvb_usb_generic_rw(d, st->data, 1 + wlen, st->data, rlen, 0); if (!ret && rbuf && rlen) memcpy(rbuf, st->data, rlen); mutex_unlock(&d->data_mutex); return ret; } /* GPIO */ static void cxusb_gpio_tuner(struct dvb_usb_device *d, int onoff) { struct cxusb_state *st = d->priv; u8 o[2], i; if (st->gpio_write_state[GPIO_TUNER] == onoff && !st->gpio_write_refresh[GPIO_TUNER]) return; o[0] = GPIO_TUNER; o[1] = onoff; if (!cxusb_ctrl_msg(d, CMD_GPIO_WRITE, o, 2, &i, 1) && i != 0x01) dev_info(&d->udev->dev, "gpio_write failed.\n"); st->gpio_write_state[GPIO_TUNER] = onoff; st->gpio_write_refresh[GPIO_TUNER] = false; } static int cxusb_bluebird_gpio_rw(struct dvb_usb_device *d, u8 changemask, u8 newval) { u8 o[2], gpio_state; int rc; o[0] = 0xff & ~changemask; /* mask of bits to keep */ o[1] = newval & changemask; /* new values for bits */ rc = cxusb_ctrl_msg(d, CMD_BLUEBIRD_GPIO_RW, o, 2, &gpio_state, 1); if (rc < 0 || (gpio_state & changemask) != (newval & changemask)) dev_info(&d->udev->dev, "bluebird_gpio_write failed.\n"); return rc < 0 ? rc : gpio_state; } static void cxusb_bluebird_gpio_pulse(struct dvb_usb_device *d, u8 pin, int low) { cxusb_bluebird_gpio_rw(d, pin, low ? 0 : pin); msleep(5); cxusb_bluebird_gpio_rw(d, pin, low ? pin : 0); } static void cxusb_nano2_led(struct dvb_usb_device *d, int onoff) { cxusb_bluebird_gpio_rw(d, 0x40, onoff ? 0 : 0x40); } static int cxusb_d680_dmb_gpio_tuner(struct dvb_usb_device *d, u8 addr, int onoff) { u8 o[2] = {addr, onoff}; u8 i; int rc; rc = cxusb_ctrl_msg(d, CMD_GPIO_WRITE, o, 2, &i, 1); if (rc < 0) return rc; if (i == 0x01) return 0; dev_info(&d->udev->dev, "gpio_write failed.\n"); return -EIO; } /* I2C */ static int cxusb_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int ret; int i; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (i = 0; i < num; i++) { if (le16_to_cpu(d->udev->descriptor.idVendor) == USB_VID_MEDION) switch (msg[i].addr) { case 0x63: cxusb_gpio_tuner(d, 0); break; default: cxusb_gpio_tuner(d, 1); break; } if (msg[i].flags & I2C_M_RD) { /* read only */ u8 obuf[3], ibuf[MAX_XFER_SIZE]; if (1 + msg[i].len > sizeof(ibuf)) { warn("i2c rd: len=%d is too big!\n", msg[i].len); ret = -EOPNOTSUPP; goto unlock; } obuf[0] = 0; obuf[1] = msg[i].len; obuf[2] = msg[i].addr; if (cxusb_ctrl_msg(d, CMD_I2C_READ, obuf, 3, ibuf, 1 + msg[i].len) < 0) { warn("i2c read failed"); break; } memcpy(msg[i].buf, &ibuf[1], msg[i].len); } else if (i + 1 < num && (msg[i + 1].flags & I2C_M_RD) && msg[i].addr == msg[i + 1].addr) { /* write to then read from same address */ u8 obuf[MAX_XFER_SIZE], ibuf[MAX_XFER_SIZE]; if (3 + msg[i].len > sizeof(obuf)) { warn("i2c wr: len=%d is too big!\n", msg[i].len); ret = -EOPNOTSUPP; goto unlock; } if (1 + msg[i + 1].len > sizeof(ibuf)) { warn("i2c rd: len=%d is too big!\n", msg[i + 1].len); ret = -EOPNOTSUPP; goto unlock; } obuf[0] = msg[i].len; obuf[1] = msg[i + 1].len; obuf[2] = msg[i].addr; memcpy(&obuf[3], msg[i].buf, msg[i].len); if (cxusb_ctrl_msg(d, CMD_I2C_READ, obuf, 3 + msg[i].len, ibuf, 1 + msg[i + 1].len) < 0) break; if (ibuf[0] != 0x08) dev_info(&d->udev->dev, "i2c read may have failed\n"); memcpy(msg[i + 1].buf, &ibuf[1], msg[i + 1].len); i++; } else { /* write only */ u8 obuf[MAX_XFER_SIZE], ibuf; if (2 + msg[i].len > sizeof(obuf)) { warn("i2c wr: len=%d is too big!\n", msg[i].len); ret = -EOPNOTSUPP; goto unlock; } obuf[0] = msg[i].addr; obuf[1] = msg[i].len; memcpy(&obuf[2], msg[i].buf, msg[i].len); if (cxusb_ctrl_msg(d, CMD_I2C_WRITE, obuf, 2 + msg[i].len, &ibuf, 1) < 0) break; if (ibuf != 0x08) dev_info(&d->udev->dev, "i2c write may have failed\n"); } } if (i == num) ret = num; else ret = -EREMOTEIO; unlock: mutex_unlock(&d->i2c_mutex); return ret; } static u32 cxusb_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm cxusb_i2c_algo = { .master_xfer = cxusb_i2c_xfer, .functionality = cxusb_i2c_func, }; static int _cxusb_power_ctrl(struct dvb_usb_device *d, int onoff) { u8 b = 0; dev_info(&d->udev->dev, "setting power %s\n", onoff ? "ON" : "OFF"); if (onoff) return cxusb_ctrl_msg(d, CMD_POWER_ON, &b, 1, NULL, 0); else return cxusb_ctrl_msg(d, CMD_POWER_OFF, &b, 1, NULL, 0); } static int cxusb_power_ctrl(struct dvb_usb_device *d, int onoff) { bool is_medion = d->props.devices[0].warm_ids[0] == &cxusb_table[MEDION_MD95700]; int ret; if (is_medion && !onoff) { struct cxusb_medion_dev *cxdev = d->priv; mutex_lock(&cxdev->open_lock); if (cxdev->open_type == CXUSB_OPEN_ANALOG) { dev_info(&d->udev->dev, "preventing DVB core from setting power OFF while we are in analog mode\n"); ret = -EBUSY; goto ret_unlock; } } ret = _cxusb_power_ctrl(d, onoff); ret_unlock: if (is_medion && !onoff) { struct cxusb_medion_dev *cxdev = d->priv; mutex_unlock(&cxdev->open_lock); } return ret; } static int cxusb_aver_power_ctrl(struct dvb_usb_device *d, int onoff) { int ret; if (!onoff) return cxusb_ctrl_msg(d, CMD_POWER_OFF, NULL, 0, NULL, 0); if (d->state == DVB_USB_STATE_INIT && usb_set_interface(d->udev, 0, 0) < 0) err("set interface failed"); do { /* Nothing */ } while (!(ret = cxusb_ctrl_msg(d, CMD_POWER_ON, NULL, 0, NULL, 0)) && !(ret = cxusb_ctrl_msg(d, 0x15, NULL, 0, NULL, 0)) && !(ret = cxusb_ctrl_msg(d, 0x17, NULL, 0, NULL, 0)) && 0); if (!ret) { /* * FIXME: We don't know why, but we need to configure the * lgdt3303 with the register settings below on resume */ int i; u8 buf; static const u8 bufs[] = { 0x0e, 0x2, 0x00, 0x7f, 0x0e, 0x2, 0x02, 0xfe, 0x0e, 0x2, 0x02, 0x01, 0x0e, 0x2, 0x00, 0x03, 0x0e, 0x2, 0x0d, 0x40, 0x0e, 0x2, 0x0e, 0x87, 0x0e, 0x2, 0x0f, 0x8e, 0x0e, 0x2, 0x10, 0x01, 0x0e, 0x2, 0x14, 0xd7, 0x0e, 0x2, 0x47, 0x88, }; msleep(20); for (i = 0; i < ARRAY_SIZE(bufs); i += 4 / sizeof(u8)) { ret = cxusb_ctrl_msg(d, CMD_I2C_WRITE, bufs + i, 4, &buf, 1); if (ret) break; if (buf != 0x8) return -EREMOTEIO; } } return ret; } static int cxusb_bluebird_power_ctrl(struct dvb_usb_device *d, int onoff) { u8 b = 0; if (onoff) return cxusb_ctrl_msg(d, CMD_POWER_ON, &b, 1, NULL, 0); else return 0; } static int cxusb_nano2_power_ctrl(struct dvb_usb_device *d, int onoff) { int rc = 0; rc = cxusb_power_ctrl(d, onoff); if (!onoff) cxusb_nano2_led(d, 0); return rc; } static int cxusb_d680_dmb_power_ctrl(struct dvb_usb_device *d, int onoff) { int ret; u8 b; ret = cxusb_power_ctrl(d, onoff); if (!onoff) return ret; msleep(128); cxusb_ctrl_msg(d, CMD_DIGITAL, NULL, 0, &b, 1); msleep(100); return ret; } static int cxusb_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) { struct dvb_usb_device *dvbdev = adap->dev; bool is_medion = dvbdev->props.devices[0].warm_ids[0] == &cxusb_table[MEDION_MD95700]; u8 buf[2] = { 0x03, 0x00 }; if (is_medion && onoff) { int ret; ret = cxusb_medion_get(dvbdev, CXUSB_OPEN_DIGITAL); if (ret != 0) return ret; } if (onoff) cxusb_ctrl_msg(dvbdev, CMD_STREAMING_ON, buf, 2, NULL, 0); else cxusb_ctrl_msg(dvbdev, CMD_STREAMING_OFF, NULL, 0, NULL, 0); if (is_medion && !onoff) cxusb_medion_put(dvbdev); return 0; } static int cxusb_aver_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) { if (onoff) cxusb_ctrl_msg(adap->dev, CMD_AVER_STREAM_ON, NULL, 0, NULL, 0); else cxusb_ctrl_msg(adap->dev, CMD_AVER_STREAM_OFF, NULL, 0, NULL, 0); return 0; } static void cxusb_d680_dmb_drain_message(struct dvb_usb_device *d) { int ep = d->props.generic_bulk_ctrl_endpoint; const int timeout = 100; const int junk_len = 32; u8 *junk; int rd_count; /* Discard remaining data in video pipe */ junk = kmalloc(junk_len, GFP_KERNEL); if (!junk) return; while (1) { if (usb_bulk_msg(d->udev, usb_rcvbulkpipe(d->udev, ep), junk, junk_len, &rd_count, timeout) < 0) break; if (!rd_count) break; } kfree(junk); } static void cxusb_d680_dmb_drain_video(struct dvb_usb_device *d) { struct usb_data_stream_properties *p = &d->props.adapter[0].fe[0].stream; const int timeout = 100; const int junk_len = p->u.bulk.buffersize; u8 *junk; int rd_count; /* Discard remaining data in video pipe */ junk = kmalloc(junk_len, GFP_KERNEL); if (!junk) return; while (1) { if (usb_bulk_msg(d->udev, usb_rcvbulkpipe(d->udev, p->endpoint), junk, junk_len, &rd_count, timeout) < 0) break; if (!rd_count) break; } kfree(junk); } static int cxusb_d680_dmb_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) { if (onoff) { u8 buf[2] = { 0x03, 0x00 }; cxusb_d680_dmb_drain_video(adap->dev); return cxusb_ctrl_msg(adap->dev, CMD_STREAMING_ON, buf, sizeof(buf), NULL, 0); } else { int ret = cxusb_ctrl_msg(adap->dev, CMD_STREAMING_OFF, NULL, 0, NULL, 0); return ret; } } static int cxusb_rc_query(struct dvb_usb_device *d) { u8 ircode[4]; if (cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4) < 0) return 0; if (ircode[2] || ircode[3]) rc_keydown(d->rc_dev, RC_PROTO_NEC, RC_SCANCODE_NEC(~ircode[2] & 0xff, ircode[3]), 0); return 0; } static int cxusb_bluebird2_rc_query(struct dvb_usb_device *d) { u8 ircode[4]; struct i2c_msg msg = { .addr = 0x6b, .flags = I2C_M_RD, .buf = ircode, .len = 4 }; if (cxusb_i2c_xfer(&d->i2c_adap, &msg, 1) != 1) return 0; if (ircode[1] || ircode[2]) rc_keydown(d->rc_dev, RC_PROTO_NEC, RC_SCANCODE_NEC(~ircode[1] & 0xff, ircode[2]), 0); return 0; } static int cxusb_d680_dmb_rc_query(struct dvb_usb_device *d) { u8 ircode[2]; if (cxusb_ctrl_msg(d, 0x10, NULL, 0, ircode, 2) < 0) return 0; if (ircode[0] || ircode[1]) rc_keydown(d->rc_dev, RC_PROTO_UNKNOWN, RC_SCANCODE_RC5(ircode[0], ircode[1]), 0); return 0; } static int cxusb_dee1601_demod_init(struct dvb_frontend *fe) { static u8 clock_config[] = { CLOCK_CTL, 0x38, 0x28 }; static u8 reset[] = { RESET, 0x80 }; static u8 adc_ctl_1_cfg[] = { ADC_CTL_1, 0x40 }; static u8 agc_cfg[] = { AGC_TARGET, 0x28, 0x20 }; static u8 gpp_ctl_cfg[] = { GPP_CTL, 0x33 }; static u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 }; mt352_write(fe, clock_config, sizeof(clock_config)); udelay(200); mt352_write(fe, reset, sizeof(reset)); mt352_write(fe, adc_ctl_1_cfg, sizeof(adc_ctl_1_cfg)); mt352_write(fe, agc_cfg, sizeof(agc_cfg)); mt352_write(fe, gpp_ctl_cfg, sizeof(gpp_ctl_cfg)); mt352_write(fe, capt_range_cfg, sizeof(capt_range_cfg)); return 0; } static int cxusb_mt352_demod_init(struct dvb_frontend *fe) { /* used in both lgz201 and th7579 */ static u8 clock_config[] = { CLOCK_CTL, 0x38, 0x29 }; static u8 reset[] = { RESET, 0x80 }; static u8 adc_ctl_1_cfg[] = { ADC_CTL_1, 0x40 }; static u8 agc_cfg[] = { AGC_TARGET, 0x24, 0x20 }; static u8 gpp_ctl_cfg[] = { GPP_CTL, 0x33 }; static u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 }; mt352_write(fe, clock_config, sizeof(clock_config)); udelay(200); mt352_write(fe, reset, sizeof(reset)); mt352_write(fe, adc_ctl_1_cfg, sizeof(adc_ctl_1_cfg)); mt352_write(fe, agc_cfg, sizeof(agc_cfg)); mt352_write(fe, gpp_ctl_cfg, sizeof(gpp_ctl_cfg)); mt352_write(fe, capt_range_cfg, sizeof(capt_range_cfg)); return 0; } static struct cx22702_config cxusb_cx22702_config = { .demod_address = 0x63, .output_mode = CX22702_PARALLEL_OUTPUT, }; static struct lgdt330x_config cxusb_lgdt3303_config = { .demod_chip = LGDT3303, }; static struct lgdt330x_config cxusb_aver_lgdt3303_config = { .demod_chip = LGDT3303, .clock_polarity_flip = 2, }; static struct mt352_config cxusb_dee1601_config = { .demod_address = 0x0f, .demod_init = cxusb_dee1601_demod_init, }; static struct zl10353_config cxusb_zl10353_dee1601_config = { .demod_address = 0x0f, .parallel_ts = 1, }; static struct mt352_config cxusb_mt352_config = { /* used in both lgz201 and th7579 */ .demod_address = 0x0f, .demod_init = cxusb_mt352_demod_init, }; static struct zl10353_config cxusb_zl10353_xc3028_config = { .demod_address = 0x0f, .if2 = 45600, .no_tuner = 1, .parallel_ts = 1, }; static struct zl10353_config cxusb_zl10353_xc3028_config_no_i2c_gate = { .demod_address = 0x0f, .if2 = 45600, .no_tuner = 1, .parallel_ts = 1, .disable_i2c_gate_ctrl = 1, }; static struct mt352_config cxusb_mt352_xc3028_config = { .demod_address = 0x0f, .if2 = 4560, .no_tuner = 1, .demod_init = cxusb_mt352_demod_init, }; /* FIXME: needs tweaking */ static struct mxl5005s_config aver_a868r_tuner = { .i2c_address = 0x63, .if_freq = 6000000UL, .xtal_freq = CRYSTAL_FREQ_16000000HZ, .agc_mode = MXL_SINGLE_AGC, .tracking_filter = MXL_TF_C, .rssi_enable = MXL_RSSI_ENABLE, .cap_select = MXL_CAP_SEL_ENABLE, .div_out = MXL_DIV_OUT_4, .clock_out = MXL_CLOCK_OUT_DISABLE, .output_load = MXL5005S_IF_OUTPUT_LOAD_200_OHM, .top = MXL5005S_TOP_25P2, .mod_mode = MXL_DIGITAL_MODE, .if_mode = MXL_ZERO_IF, .AgcMasterByte = 0x00, }; /* FIXME: needs tweaking */ static struct mxl5005s_config d680_dmb_tuner = { .i2c_address = 0x63, .if_freq = 36125000UL, .xtal_freq = CRYSTAL_FREQ_16000000HZ, .agc_mode = MXL_SINGLE_AGC, .tracking_filter = MXL_TF_C, .rssi_enable = MXL_RSSI_ENABLE, .cap_select = MXL_CAP_SEL_ENABLE, .div_out = MXL_DIV_OUT_4, .clock_out = MXL_CLOCK_OUT_DISABLE, .output_load = MXL5005S_IF_OUTPUT_LOAD_200_OHM, .top = MXL5005S_TOP_25P2, .mod_mode = MXL_DIGITAL_MODE, .if_mode = MXL_ZERO_IF, .AgcMasterByte = 0x00, }; static struct max2165_config mygica_d689_max2165_cfg = { .i2c_address = 0x60, .osc_clk = 20 }; /* Callbacks for DVB USB */ static int cxusb_fmd1216me_tuner_attach(struct dvb_usb_adapter *adap) { struct dvb_usb_device *dvbdev = adap->dev; bool is_medion = dvbdev->props.devices[0].warm_ids[0] == &cxusb_table[MEDION_MD95700]; dvb_attach(simple_tuner_attach, adap->fe_adap[0].fe, &dvbdev->i2c_adap, 0x61, TUNER_PHILIPS_FMD1216ME_MK3); if (is_medion && adap->fe_adap[0].fe) /* * make sure that DVB core won't put to sleep (reset, really) * tuner when we might be open in analog mode */ adap->fe_adap[0].fe->ops.tuner_ops.sleep = NULL; return 0; } static int cxusb_dee1601_tuner_attach(struct dvb_usb_adapter *adap) { dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x61, NULL, DVB_PLL_THOMSON_DTT7579); return 0; } static int cxusb_lgz201_tuner_attach(struct dvb_usb_adapter *adap) { dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x61, NULL, DVB_PLL_LG_Z201); return 0; } static int cxusb_dtt7579_tuner_attach(struct dvb_usb_adapter *adap) { dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x60, NULL, DVB_PLL_THOMSON_DTT7579); return 0; } static int cxusb_lgh064f_tuner_attach(struct dvb_usb_adapter *adap) { dvb_attach(simple_tuner_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap, 0x61, TUNER_LG_TDVS_H06XF); return 0; } static int dvico_bluebird_xc2028_callback(void *ptr, int component, int command, int arg) { struct dvb_usb_adapter *adap = ptr; struct dvb_usb_device *d = adap->dev; switch (command) { case XC2028_TUNER_RESET: dev_info(&d->udev->dev, "XC2028_TUNER_RESET %d\n", arg); cxusb_bluebird_gpio_pulse(d, 0x01, 1); break; case XC2028_RESET_CLK: dev_info(&d->udev->dev, "XC2028_RESET_CLK %d\n", arg); break; case XC2028_I2C_FLUSH: break; default: dev_info(&d->udev->dev, "unknown command %d, arg %d\n", command, arg); return -EINVAL; } return 0; } static int cxusb_dvico_xc3028_tuner_attach(struct dvb_usb_adapter *adap) { struct dvb_frontend *fe; struct xc2028_config cfg = { .i2c_adap = &adap->dev->i2c_adap, .i2c_addr = 0x61, }; static struct xc2028_ctrl ctl = { .fname = XC2028_DEFAULT_FIRMWARE, .max_len = 64, .demod = XC3028_FE_ZARLINK456, }; /* FIXME: generalize & move to common area */ adap->fe_adap[0].fe->callback = dvico_bluebird_xc2028_callback; fe = dvb_attach(xc2028_attach, adap->fe_adap[0].fe, &cfg); if (!fe || !fe->ops.tuner_ops.set_config) return -EIO; fe->ops.tuner_ops.set_config(fe, &ctl); return 0; } static int cxusb_mxl5003s_tuner_attach(struct dvb_usb_adapter *adap) { dvb_attach(mxl5005s_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap, &aver_a868r_tuner); return 0; } static int cxusb_d680_dmb_tuner_attach(struct dvb_usb_adapter *adap) { struct dvb_frontend *fe; fe = dvb_attach(mxl5005s_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap, &d680_dmb_tuner); return (!fe) ? -EIO : 0; } static int cxusb_mygica_d689_tuner_attach(struct dvb_usb_adapter *adap) { struct dvb_frontend *fe; fe = dvb_attach(max2165_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap, &mygica_d689_max2165_cfg); return (!fe) ? -EIO : 0; } static int cxusb_medion_fe_ts_bus_ctrl(struct dvb_frontend *fe, int acquire) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct dvb_usb_device *dvbdev = adap->dev; if (acquire) return cxusb_medion_get(dvbdev, CXUSB_OPEN_DIGITAL); cxusb_medion_put(dvbdev); return 0; } static int cxusb_medion_set_mode(struct dvb_usb_device *dvbdev, bool digital) { struct cxusb_state *st = dvbdev->priv; int ret; u8 b; unsigned int i; /* * switching mode while doing an I2C transaction often causes * the device to crash */ mutex_lock(&dvbdev->i2c_mutex); if (digital) { ret = usb_set_interface(dvbdev->udev, 0, 6); if (ret != 0) { dev_err(&dvbdev->udev->dev, "digital interface selection failed (%d)\n", ret); goto ret_unlock; } } else { ret = usb_set_interface(dvbdev->udev, 0, 1); if (ret != 0) { dev_err(&dvbdev->udev->dev, "analog interface selection failed (%d)\n", ret); goto ret_unlock; } } /* pipes need to be cleared after setting interface */ ret = usb_clear_halt(dvbdev->udev, usb_rcvbulkpipe(dvbdev->udev, 1)); if (ret != 0) dev_warn(&dvbdev->udev->dev, "clear halt on IN pipe failed (%d)\n", ret); ret = usb_clear_halt(dvbdev->udev, usb_sndbulkpipe(dvbdev->udev, 1)); if (ret != 0) dev_warn(&dvbdev->udev->dev, "clear halt on OUT pipe failed (%d)\n", ret); ret = cxusb_ctrl_msg(dvbdev, digital ? CMD_DIGITAL : CMD_ANALOG, NULL, 0, &b, 1); if (ret != 0) { dev_err(&dvbdev->udev->dev, "mode switch failed (%d)\n", ret); goto ret_unlock; } /* mode switch seems to reset GPIO states */ for (i = 0; i < ARRAY_SIZE(st->gpio_write_refresh); i++) st->gpio_write_refresh[i] = true; ret_unlock: mutex_unlock(&dvbdev->i2c_mutex); return ret; } static int cxusb_cx22702_frontend_attach(struct dvb_usb_adapter *adap) { struct dvb_usb_device *dvbdev = adap->dev; bool is_medion = dvbdev->props.devices[0].warm_ids[0] == &cxusb_table[MEDION_MD95700]; if (is_medion) { int ret; ret = cxusb_medion_set_mode(dvbdev, true); if (ret) return ret; } adap->fe_adap[0].fe = dvb_attach(cx22702_attach, &cxusb_cx22702_config, &dvbdev->i2c_adap); if (!adap->fe_adap[0].fe) return -EIO; if (is_medion) adap->fe_adap[0].fe->ops.ts_bus_ctrl = cxusb_medion_fe_ts_bus_ctrl; return 0; } static int cxusb_lgdt3303_frontend_attach(struct dvb_usb_adapter *adap) { if (usb_set_interface(adap->dev->udev, 0, 7) < 0) err("set interface failed"); cxusb_ctrl_msg(adap->dev, CMD_DIGITAL, NULL, 0, NULL, 0); adap->fe_adap[0].fe = dvb_attach(lgdt330x_attach, &cxusb_lgdt3303_config, 0x0e, &adap->dev->i2c_adap); if (adap->fe_adap[0].fe) return 0; return -EIO; } static int cxusb_aver_lgdt3303_frontend_attach(struct dvb_usb_adapter *adap) { adap->fe_adap[0].fe = dvb_attach(lgdt330x_attach, &cxusb_aver_lgdt3303_config, 0x0e, &adap->dev->i2c_adap); if (adap->fe_adap[0].fe) return 0; return -EIO; } static int cxusb_mt352_frontend_attach(struct dvb_usb_adapter *adap) { /* used in both lgz201 and th7579 */ if (usb_set_interface(adap->dev->udev, 0, 0) < 0) err("set interface failed"); cxusb_ctrl_msg(adap->dev, CMD_DIGITAL, NULL, 0, NULL, 0); adap->fe_adap[0].fe = dvb_attach(mt352_attach, &cxusb_mt352_config, &adap->dev->i2c_adap); if (adap->fe_adap[0].fe) return 0; return -EIO; } static int cxusb_dee1601_frontend_attach(struct dvb_usb_adapter *adap) { if (usb_set_interface(adap->dev->udev, 0, 0) < 0) err("set interface failed"); cxusb_ctrl_msg(adap->dev, CMD_DIGITAL, NULL, 0, NULL, 0); adap->fe_adap[0].fe = dvb_attach(mt352_attach, &cxusb_dee1601_config, &adap->dev->i2c_adap); if (adap->fe_adap[0].fe) return 0; adap->fe_adap[0].fe = dvb_attach(zl10353_attach, &cxusb_zl10353_dee1601_config, &adap->dev->i2c_adap); if (adap->fe_adap[0].fe) return 0; return -EIO; } static int cxusb_dualdig4_frontend_attach(struct dvb_usb_adapter *adap) { u8 ircode[4]; int i; struct i2c_msg msg = { .addr = 0x6b, .flags = I2C_M_RD, .buf = ircode, .len = 4 }; if (usb_set_interface(adap->dev->udev, 0, 1) < 0) err("set interface failed"); cxusb_ctrl_msg(adap->dev, CMD_DIGITAL, NULL, 0, NULL, 0); /* reset the tuner and demodulator */ cxusb_bluebird_gpio_rw(adap->dev, 0x04, 0); cxusb_bluebird_gpio_pulse(adap->dev, 0x01, 1); cxusb_bluebird_gpio_pulse(adap->dev, 0x02, 1); adap->fe_adap[0].fe = dvb_attach(zl10353_attach, &cxusb_zl10353_xc3028_config_no_i2c_gate, &adap->dev->i2c_adap); if (!adap->fe_adap[0].fe) return -EIO; /* try to determine if there is no IR decoder on the I2C bus */ for (i = 0; adap->dev->props.rc.core.rc_codes && i < 5; i++) { msleep(20); if (cxusb_i2c_xfer(&adap->dev->i2c_adap, &msg, 1) != 1) goto no_IR; if (ircode[0] == 0 && ircode[1] == 0) continue; if (ircode[2] + ircode[3] != 0xff) { no_IR: adap->dev->props.rc.core.rc_codes = NULL; info("No IR receiver detected on this device."); break; } } return 0; } static struct dibx000_agc_config dib7070_agc_config = { .band_caps = BAND_UHF | BAND_VHF | BAND_LBAND | BAND_SBAND, /* * P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=5, * P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, P_agc_inh_dc_rv_est=0, * P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */ .setup = (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0), .inv_gain = 600, .time_stabiliz = 10, .alpha_level = 0, .thlock = 118, .wbd_inv = 0, .wbd_ref = 3530, .wbd_sel = 1, .wbd_alpha = 5, .agc1_max = 65535, .agc1_min = 0, .agc2_max = 65535, .agc2_min = 0, .agc1_pt1 = 0, .agc1_pt2 = 40, .agc1_pt3 = 183, .agc1_slope1 = 206, .agc1_slope2 = 255, .agc2_pt1 = 72, .agc2_pt2 = 152, .agc2_slope1 = 88, .agc2_slope2 = 90, .alpha_mant = 17, .alpha_exp = 27, .beta_mant = 23, .beta_exp = 51, .perform_agc_softsplit = 0, }; static struct dibx000_bandwidth_config dib7070_bw_config_12_mhz = { .internal = 60000, .sampling = 15000, .pll_prediv = 1, .pll_ratio = 20, .pll_range = 3, .pll_reset = 1, .pll_bypass = 0, .enable_refdiv = 0, .bypclk_div = 0, .IO_CLK_en_core = 1, .ADClkSrc = 1, .modulo = 2, /* refsel, sel, freq_15k */ .sad_cfg = (3 << 14) | (1 << 12) | (524 << 0), .ifreq = (0 << 25) | 0, .timf = 20452225, .xtal_hz = 12000000, }; static struct dib7000p_config cxusb_dualdig4_rev2_config = { .output_mode = OUTMODE_MPEG2_PAR_GATED_CLK, .output_mpeg2_in_188_bytes = 1, .agc_config_count = 1, .agc = &dib7070_agc_config, .bw = &dib7070_bw_config_12_mhz, .tuner_is_baseband = 1, .spur_protect = 1, .gpio_dir = 0xfcef, .gpio_val = 0x0110, .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS, .hostbus_diversity = 1, }; struct dib0700_adapter_state { int (*set_param_save)(struct dvb_frontend *fe); struct dib7000p_ops dib7000p_ops; }; static int cxusb_dualdig4_rev2_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; if (usb_set_interface(adap->dev->udev, 0, 1) < 0) err("set interface failed"); cxusb_ctrl_msg(adap->dev, CMD_DIGITAL, NULL, 0, NULL, 0); cxusb_bluebird_gpio_pulse(adap->dev, 0x02, 1); if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 18, &cxusb_dualdig4_rev2_config) < 0) { pr_warn("Unable to enumerate dib7000p\n"); return -ENODEV; } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80, &cxusb_dualdig4_rev2_config); if (!adap->fe_adap[0].fe) return -EIO; return 0; } static int dib7070_tuner_reset(struct dvb_frontend *fe, int onoff) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct dib0700_adapter_state *state = adap->priv; return state->dib7000p_ops.set_gpio(fe, 8, 0, !onoff); } static int dib7070_tuner_sleep(struct dvb_frontend *fe, int onoff) { return 0; } static struct dib0070_config dib7070p_dib0070_config = { .i2c_address = DEFAULT_DIB0070_I2C_ADDRESS, .reset = dib7070_tuner_reset, .sleep = dib7070_tuner_sleep, .clock_khz = 12000, }; static int dib7070_set_param_override(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct dvb_usb_adapter *adap = fe->dvb->priv; struct dib0700_adapter_state *state = adap->priv; u16 offset; u8 band = BAND_OF_FREQUENCY(p->frequency / 1000); switch (band) { case BAND_VHF: offset = 950; break; default: case BAND_UHF: offset = 550; break; } state->dib7000p_ops.set_wbd_ref(fe, offset + dib0070_wbd_offset(fe)); return state->set_param_save(fe); } static int cxusb_dualdig4_rev2_tuner_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *st = adap->priv; struct i2c_adapter *tun_i2c; /* * No need to call dvb7000p_attach here, as it was called * already, as frontend_attach method is called first, and * tuner_attach is only called on success. */ tun_i2c = st->dib7000p_ops.get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1); if (dvb_attach(dib0070_attach, adap->fe_adap[0].fe, tun_i2c, &dib7070p_dib0070_config) == NULL) return -ENODEV; st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params; adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib7070_set_param_override; return 0; } static int cxusb_nano2_frontend_attach(struct dvb_usb_adapter *adap) { if (usb_set_interface(adap->dev->udev, 0, 1) < 0) err("set interface failed"); cxusb_ctrl_msg(adap->dev, CMD_DIGITAL, NULL, 0, NULL, 0); /* reset the tuner and demodulator */ cxusb_bluebird_gpio_rw(adap->dev, 0x04, 0); cxusb_bluebird_gpio_pulse(adap->dev, 0x01, 1); cxusb_bluebird_gpio_pulse(adap->dev, 0x02, 1); adap->fe_adap[0].fe = dvb_attach(zl10353_attach, &cxusb_zl10353_xc3028_config, &adap->dev->i2c_adap); if (adap->fe_adap[0].fe) return 0; adap->fe_adap[0].fe = dvb_attach(mt352_attach, &cxusb_mt352_xc3028_config, &adap->dev->i2c_adap); if (adap->fe_adap[0].fe) return 0; return -EIO; } static struct lgs8gxx_config d680_lgs8gl5_cfg = { .prod = LGS8GXX_PROD_LGS8GL5, .demod_address = 0x19, .serial_ts = 0, .ts_clk_pol = 0, .ts_clk_gated = 1, .if_clk_freq = 30400, /* 30.4 MHz */ .if_freq = 5725, /* 5.725 MHz */ .if_neg_center = 0, .ext_adc = 0, .adc_signed = 0, .if_neg_edge = 0, }; static int cxusb_d680_dmb_frontend_attach(struct dvb_usb_adapter *adap) { struct dvb_usb_device *d = adap->dev; int n; /* Select required USB configuration */ if (usb_set_interface(d->udev, 0, 0) < 0) err("set interface failed"); /* Unblock all USB pipes */ usb_clear_halt(d->udev, usb_sndbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint)); usb_clear_halt(d->udev, usb_rcvbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint)); usb_clear_halt(d->udev, usb_rcvbulkpipe(d->udev, d->props.adapter[0].fe[0].stream.endpoint)); /* Drain USB pipes to avoid hang after reboot */ for (n = 0; n < 5; n++) { cxusb_d680_dmb_drain_message(d); cxusb_d680_dmb_drain_video(d); msleep(200); } /* Reset the tuner */ if (cxusb_d680_dmb_gpio_tuner(d, 0x07, 0) < 0) { err("clear tuner gpio failed"); return -EIO; } msleep(100); if (cxusb_d680_dmb_gpio_tuner(d, 0x07, 1) < 0) { err("set tuner gpio failed"); return -EIO; } msleep(100); /* Attach frontend */ adap->fe_adap[0].fe = dvb_attach(lgs8gxx_attach, &d680_lgs8gl5_cfg, &d->i2c_adap); if (!adap->fe_adap[0].fe) return -EIO; return 0; } static struct atbm8830_config mygica_d689_atbm8830_cfg = { .prod = ATBM8830_PROD_8830, .demod_address = 0x40, .serial_ts = 0, .ts_sampling_edge = 1, .ts_clk_gated = 0, .osc_clk_freq = 30400, /* in kHz */ .if_freq = 0, /* zero IF */ .zif_swap_iq = 1, .agc_min = 0x2E, .agc_max = 0x90, .agc_hold_loop = 0, }; static int cxusb_mygica_d689_frontend_attach(struct dvb_usb_adapter *adap) { struct dvb_usb_device *d = adap->dev; /* Select required USB configuration */ if (usb_set_interface(d->udev, 0, 0) < 0) err("set interface failed"); /* Unblock all USB pipes */ usb_clear_halt(d->udev, usb_sndbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint)); usb_clear_halt(d->udev, usb_rcvbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint)); usb_clear_halt(d->udev, usb_rcvbulkpipe(d->udev, d->props.adapter[0].fe[0].stream.endpoint)); /* Reset the tuner */ if (cxusb_d680_dmb_gpio_tuner(d, 0x07, 0) < 0) { err("clear tuner gpio failed"); return -EIO; } msleep(100); if (cxusb_d680_dmb_gpio_tuner(d, 0x07, 1) < 0) { err("set tuner gpio failed"); return -EIO; } msleep(100); /* Attach frontend */ adap->fe_adap[0].fe = dvb_attach(atbm8830_attach, &mygica_d689_atbm8830_cfg, &d->i2c_adap); if (!adap->fe_adap[0].fe) return -EIO; return 0; } /* * DViCO has shipped two devices with the same USB ID, but only one of them * needs a firmware download. Check the device class details to see if they * have non-default values to decide whether the device is actually cold or * not, and forget a match if it turns out we selected the wrong device. */ static int bluebird_fx2_identify_state(struct usb_device *udev, const struct dvb_usb_device_properties *props, const struct dvb_usb_device_description **desc, int *cold) { int wascold = *cold; *cold = udev->descriptor.bDeviceClass == 0xff && udev->descriptor.bDeviceSubClass == 0xff && udev->descriptor.bDeviceProtocol == 0xff; if (*cold && !wascold) *desc = NULL; return 0; } /* * DViCO bluebird firmware needs the "warm" product ID to be patched into the * firmware file before download. */ static const int dvico_firmware_id_offsets[] = { 6638, 3204 }; static int bluebird_patch_dvico_firmware_download(struct usb_device *udev, const struct firmware *fw) { int pos; for (pos = 0; pos < ARRAY_SIZE(dvico_firmware_id_offsets); pos++) { int idoff = dvico_firmware_id_offsets[pos]; if (fw->size < idoff + 4) continue; if (fw->data[idoff] == (USB_VID_DVICO & 0xff) && fw->data[idoff + 1] == USB_VID_DVICO >> 8) { struct firmware new_fw; u8 *new_fw_data = vmalloc(fw->size); int ret; if (!new_fw_data) return -ENOMEM; memcpy(new_fw_data, fw->data, fw->size); new_fw.size = fw->size; new_fw.data = new_fw_data; new_fw_data[idoff + 2] = le16_to_cpu(udev->descriptor.idProduct) + 1; new_fw_data[idoff + 3] = le16_to_cpu(udev->descriptor.idProduct) >> 8; ret = usb_cypress_load_firmware(udev, &new_fw, CYPRESS_FX2); vfree(new_fw_data); return ret; } } return -EINVAL; } int cxusb_medion_get(struct dvb_usb_device *dvbdev, enum cxusb_open_type open_type) { struct cxusb_medion_dev *cxdev = dvbdev->priv; int ret = 0; mutex_lock(&cxdev->open_lock); if (WARN_ON((cxdev->open_type == CXUSB_OPEN_INIT || cxdev->open_type == CXUSB_OPEN_NONE) && cxdev->open_ctr != 0)) { ret = -EINVAL; goto ret_unlock; } if (cxdev->open_type == CXUSB_OPEN_INIT) { ret = -EAGAIN; goto ret_unlock; } if (cxdev->open_ctr == 0) { if (cxdev->open_type != open_type) { dev_info(&dvbdev->udev->dev, "will acquire and switch to %s\n", open_type == CXUSB_OPEN_ANALOG ? "analog" : "digital"); if (open_type == CXUSB_OPEN_ANALOG) { ret = _cxusb_power_ctrl(dvbdev, 1); if (ret != 0) dev_warn(&dvbdev->udev->dev, "powerup for analog switch failed (%d)\n", ret); ret = cxusb_medion_set_mode(dvbdev, false); if (ret != 0) goto ret_unlock; ret = cxusb_medion_analog_init(dvbdev); if (ret != 0) goto ret_unlock; } else { /* digital */ ret = _cxusb_power_ctrl(dvbdev, 1); if (ret != 0) dev_warn(&dvbdev->udev->dev, "powerup for digital switch failed (%d)\n", ret); ret = cxusb_medion_set_mode(dvbdev, true); if (ret != 0) goto ret_unlock; } cxdev->open_type = open_type; } else { dev_info(&dvbdev->udev->dev, "reacquired idle %s\n", open_type == CXUSB_OPEN_ANALOG ? "analog" : "digital"); } cxdev->open_ctr = 1; } else if (cxdev->open_type == open_type) { cxdev->open_ctr++; dev_info(&dvbdev->udev->dev, "acquired %s\n", open_type == CXUSB_OPEN_ANALOG ? "analog" : "digital"); } else { ret = -EBUSY; } ret_unlock: mutex_unlock(&cxdev->open_lock); return ret; } void cxusb_medion_put(struct dvb_usb_device *dvbdev) { struct cxusb_medion_dev *cxdev = dvbdev->priv; mutex_lock(&cxdev->open_lock); if (cxdev->open_type == CXUSB_OPEN_INIT) { WARN_ON(cxdev->open_ctr != 0); cxdev->open_type = CXUSB_OPEN_NONE; goto unlock; } if (!WARN_ON(cxdev->open_ctr < 1)) { cxdev->open_ctr--; dev_info(&dvbdev->udev->dev, "release %s\n", cxdev->open_type == CXUSB_OPEN_ANALOG ? "analog" : "digital"); } unlock: mutex_unlock(&cxdev->open_lock); } /* DVB USB Driver stuff */ static struct dvb_usb_device_properties cxusb_medion_properties; static struct dvb_usb_device_properties cxusb_bluebird_lgh064f_properties; static struct dvb_usb_device_properties cxusb_bluebird_dee1601_properties; static struct dvb_usb_device_properties cxusb_bluebird_lgz201_properties; static struct dvb_usb_device_properties cxusb_bluebird_dtt7579_properties; static struct dvb_usb_device_properties cxusb_bluebird_dualdig4_properties; static struct dvb_usb_device_properties cxusb_bluebird_dualdig4_rev2_properties; static struct dvb_usb_device_properties cxusb_bluebird_nano2_properties; static struct dvb_usb_device_properties cxusb_bluebird_nano2_needsfirmware_properties; static struct dvb_usb_device_properties cxusb_aver_a868r_properties; static struct dvb_usb_device_properties cxusb_d680_dmb_properties; static struct dvb_usb_device_properties cxusb_mygica_d689_properties; static int cxusb_medion_priv_init(struct dvb_usb_device *dvbdev) { struct cxusb_medion_dev *cxdev = dvbdev->priv; cxdev->dvbdev = dvbdev; cxdev->open_type = CXUSB_OPEN_INIT; mutex_init(&cxdev->open_lock); return 0; } static void cxusb_medion_priv_destroy(struct dvb_usb_device *dvbdev) { struct cxusb_medion_dev *cxdev = dvbdev->priv; mutex_destroy(&cxdev->open_lock); } static bool cxusb_medion_check_altsetting(struct usb_host_interface *as) { unsigned int ctr; for (ctr = 0; ctr < as->desc.bNumEndpoints; ctr++) { if ((as->endpoint[ctr].desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != 2) continue; if (as->endpoint[ctr].desc.bEndpointAddress & USB_DIR_IN && ((as->endpoint[ctr].desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_ISOC)) return true; break; } return false; } static bool cxusb_medion_check_intf(struct usb_interface *intf) { unsigned int ctr; if (intf->num_altsetting < 2) { dev_err(intf->usb_dev, "no alternate interface"); return false; } for (ctr = 0; ctr < intf->num_altsetting; ctr++) { if (intf->altsetting[ctr].desc.bAlternateSetting != 1) continue; if (cxusb_medion_check_altsetting(&intf->altsetting[ctr])) return true; break; } dev_err(intf->usb_dev, "no iso interface"); return false; } static int cxusb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct dvb_usb_device *dvbdev; int ret; /* Medion 95700 */ if (!dvb_usb_device_init(intf, &cxusb_medion_properties, THIS_MODULE, &dvbdev, adapter_nr)) { if (!cxusb_medion_check_intf(intf)) { ret = -ENODEV; goto ret_uninit; } _cxusb_power_ctrl(dvbdev, 1); ret = cxusb_medion_set_mode(dvbdev, false); if (ret) goto ret_uninit; ret = cxusb_medion_register_analog(dvbdev); cxusb_medion_set_mode(dvbdev, true); _cxusb_power_ctrl(dvbdev, 0); if (ret != 0) goto ret_uninit; /* release device from INIT mode to normal operation */ cxusb_medion_put(dvbdev); return 0; } else if (!dvb_usb_device_init(intf, &cxusb_bluebird_lgh064f_properties, THIS_MODULE, NULL, adapter_nr) || !dvb_usb_device_init(intf, &cxusb_bluebird_dee1601_properties, THIS_MODULE, NULL, adapter_nr) || !dvb_usb_device_init(intf, &cxusb_bluebird_lgz201_properties, THIS_MODULE, NULL, adapter_nr) || !dvb_usb_device_init(intf, &cxusb_bluebird_dtt7579_properties, THIS_MODULE, NULL, adapter_nr) || !dvb_usb_device_init(intf, &cxusb_bluebird_dualdig4_properties, THIS_MODULE, NULL, adapter_nr) || !dvb_usb_device_init(intf, &cxusb_bluebird_nano2_properties, THIS_MODULE, NULL, adapter_nr) || !dvb_usb_device_init(intf, &cxusb_bluebird_nano2_needsfirmware_properties, THIS_MODULE, NULL, adapter_nr) || !dvb_usb_device_init(intf, &cxusb_aver_a868r_properties, THIS_MODULE, NULL, adapter_nr) || !dvb_usb_device_init(intf, &cxusb_bluebird_dualdig4_rev2_properties, THIS_MODULE, NULL, adapter_nr) || !dvb_usb_device_init(intf, &cxusb_d680_dmb_properties, THIS_MODULE, NULL, adapter_nr) || !dvb_usb_device_init(intf, &cxusb_mygica_d689_properties, THIS_MODULE, NULL, adapter_nr) || 0) return 0; return -EINVAL; ret_uninit: dvb_usb_device_exit(intf); return ret; } static void cxusb_disconnect(struct usb_interface *intf) { struct dvb_usb_device *d = usb_get_intfdata(intf); struct cxusb_state *st = d->priv; struct i2c_client *client; if (d->props.devices[0].warm_ids[0] == &cxusb_table[MEDION_MD95700]) cxusb_medion_unregister_analog(d); /* remove I2C client for tuner */ client = st->i2c_client_tuner; if (client) { module_put(client->dev.driver->owner); i2c_unregister_device(client); } /* remove I2C client for demodulator */ client = st->i2c_client_demod; if (client) { module_put(client->dev.driver->owner); i2c_unregister_device(client); } dvb_usb_device_exit(intf); } static const struct usb_device_id cxusb_table[] = { DVB_USB_DEV(MEDION, MEDION_MD95700), DVB_USB_DEV(DVICO, DVICO_BLUEBIRD_LG064F_COLD), DVB_USB_DEV(DVICO, DVICO_BLUEBIRD_LG064F_WARM), DVB_USB_DEV(DVICO, DVICO_BLUEBIRD_DUAL_1_COLD), DVB_USB_DEV(DVICO, DVICO_BLUEBIRD_DUAL_1_WARM), DVB_USB_DEV(DVICO, DVICO_BLUEBIRD_LGZ201_COLD), DVB_USB_DEV(DVICO, DVICO_BLUEBIRD_LGZ201_WARM), DVB_USB_DEV(DVICO, DVICO_BLUEBIRD_TH7579_COLD), DVB_USB_DEV(DVICO, DVICO_BLUEBIRD_TH7579_WARM), DVB_USB_DEV(DVICO, DIGITALNOW_BLUEBIRD_DUAL_1_COLD), DVB_USB_DEV(DVICO, DIGITALNOW_BLUEBIRD_DUAL_1_WARM), DVB_USB_DEV(DVICO, DVICO_BLUEBIRD_DUAL_2_COLD), DVB_USB_DEV(DVICO, DVICO_BLUEBIRD_DUAL_2_WARM), DVB_USB_DEV(DVICO, DVICO_BLUEBIRD_DUAL_4), DVB_USB_DEV(DVICO, DVICO_BLUEBIRD_DVB_T_NANO_2), DVB_USB_DEV(DVICO, DVICO_BLUEBIRD_DVB_T_NANO_2_NFW_WARM), DVB_USB_DEV(AVERMEDIA, AVERMEDIA_VOLAR_A868R), DVB_USB_DEV(DVICO, DVICO_BLUEBIRD_DUAL_4_REV_2), DVB_USB_DEV(CONEXANT, CONEXANT_D680_DMB), DVB_USB_DEV(CONEXANT, MYGICA_D689), { } }; MODULE_DEVICE_TABLE(usb, cxusb_table); static struct dvb_usb_device_properties cxusb_medion_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = CYPRESS_FX2, .size_of_priv = sizeof(struct cxusb_medion_dev), .priv_init = cxusb_medion_priv_init, .priv_destroy = cxusb_medion_priv_destroy, .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .streaming_ctrl = cxusb_streaming_ctrl, .frontend_attach = cxusb_cx22702_frontend_attach, .tuner_attach = cxusb_fmd1216me_tuner_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 5, .endpoint = 0x02, .u = { .bulk = { .buffersize = 8192, } } }, } }, }, }, .power_ctrl = cxusb_power_ctrl, .i2c_algo = &cxusb_i2c_algo, .generic_bulk_ctrl_endpoint = 0x01, .num_device_descs = 1, .devices = { { "Medion MD95700 (MDUSBTV-HYBRID)", { NULL }, { &cxusb_table[MEDION_MD95700], NULL }, }, } }; static struct dvb_usb_device_properties cxusb_bluebird_lgh064f_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-bluebird-01.fw", .download_firmware = bluebird_patch_dvico_firmware_download, /* * use usb alt setting 0 for EP4 transfer (dvb-t), * use usb alt setting 7 for EP2 transfer (atsc) */ .size_of_priv = sizeof(struct cxusb_state), .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .streaming_ctrl = cxusb_streaming_ctrl, .frontend_attach = cxusb_lgdt3303_frontend_attach, .tuner_attach = cxusb_lgh064f_tuner_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 5, .endpoint = 0x02, .u = { .bulk = { .buffersize = 8192, } } }, } }, }, }, .power_ctrl = cxusb_bluebird_power_ctrl, .i2c_algo = &cxusb_i2c_algo, .rc.core = { .rc_interval = 100, .rc_codes = RC_MAP_DVICO_PORTABLE, .module_name = KBUILD_MODNAME, .rc_query = cxusb_rc_query, .allowed_protos = RC_PROTO_BIT_NEC, }, .generic_bulk_ctrl_endpoint = 0x01, .num_device_descs = 1, .devices = { { "DViCO FusionHDTV5 USB Gold", { &cxusb_table[DVICO_BLUEBIRD_LG064F_COLD], NULL }, { &cxusb_table[DVICO_BLUEBIRD_LG064F_WARM], NULL }, }, } }; static struct dvb_usb_device_properties cxusb_bluebird_dee1601_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-bluebird-01.fw", .download_firmware = bluebird_patch_dvico_firmware_download, /* * use usb alt setting 0 for EP4 transfer (dvb-t), * use usb alt setting 7 for EP2 transfer (atsc) */ .size_of_priv = sizeof(struct cxusb_state), .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .streaming_ctrl = cxusb_streaming_ctrl, .frontend_attach = cxusb_dee1601_frontend_attach, .tuner_attach = cxusb_dee1601_tuner_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 5, .endpoint = 0x04, .u = { .bulk = { .buffersize = 8192, } } }, } }, }, }, .power_ctrl = cxusb_bluebird_power_ctrl, .i2c_algo = &cxusb_i2c_algo, .rc.core = { .rc_interval = 100, .rc_codes = RC_MAP_DVICO_MCE, .module_name = KBUILD_MODNAME, .rc_query = cxusb_rc_query, .allowed_protos = RC_PROTO_BIT_NEC, }, .generic_bulk_ctrl_endpoint = 0x01, .num_device_descs = 3, .devices = { { "DViCO FusionHDTV DVB-T Dual USB", { &cxusb_table[DVICO_BLUEBIRD_DUAL_1_COLD], NULL }, { &cxusb_table[DVICO_BLUEBIRD_DUAL_1_WARM], NULL }, }, { "DigitalNow DVB-T Dual USB", { &cxusb_table[DIGITALNOW_BLUEBIRD_DUAL_1_COLD], NULL }, { &cxusb_table[DIGITALNOW_BLUEBIRD_DUAL_1_WARM], NULL }, }, { "DViCO FusionHDTV DVB-T Dual Digital 2", { &cxusb_table[DVICO_BLUEBIRD_DUAL_2_COLD], NULL }, { &cxusb_table[DVICO_BLUEBIRD_DUAL_2_WARM], NULL }, }, } }; static struct dvb_usb_device_properties cxusb_bluebird_lgz201_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-bluebird-01.fw", .download_firmware = bluebird_patch_dvico_firmware_download, /* * use usb alt setting 0 for EP4 transfer (dvb-t), * use usb alt setting 7 for EP2 transfer (atsc) */ .size_of_priv = sizeof(struct cxusb_state), .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .streaming_ctrl = cxusb_streaming_ctrl, .frontend_attach = cxusb_mt352_frontend_attach, .tuner_attach = cxusb_lgz201_tuner_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 5, .endpoint = 0x04, .u = { .bulk = { .buffersize = 8192, } } }, } }, }, }, .power_ctrl = cxusb_bluebird_power_ctrl, .i2c_algo = &cxusb_i2c_algo, .rc.core = { .rc_interval = 100, .rc_codes = RC_MAP_DVICO_PORTABLE, .module_name = KBUILD_MODNAME, .rc_query = cxusb_rc_query, .allowed_protos = RC_PROTO_BIT_NEC, }, .generic_bulk_ctrl_endpoint = 0x01, .num_device_descs = 1, .devices = { { "DViCO FusionHDTV DVB-T USB (LGZ201)", { &cxusb_table[DVICO_BLUEBIRD_LGZ201_COLD], NULL }, { &cxusb_table[DVICO_BLUEBIRD_LGZ201_WARM], NULL }, }, } }; static struct dvb_usb_device_properties cxusb_bluebird_dtt7579_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-bluebird-01.fw", .download_firmware = bluebird_patch_dvico_firmware_download, /* * use usb alt setting 0 for EP4 transfer (dvb-t), * use usb alt setting 7 for EP2 transfer (atsc) */ .size_of_priv = sizeof(struct cxusb_state), .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .streaming_ctrl = cxusb_streaming_ctrl, .frontend_attach = cxusb_mt352_frontend_attach, .tuner_attach = cxusb_dtt7579_tuner_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 5, .endpoint = 0x04, .u = { .bulk = { .buffersize = 8192, } } }, } }, }, }, .power_ctrl = cxusb_bluebird_power_ctrl, .i2c_algo = &cxusb_i2c_algo, .rc.core = { .rc_interval = 100, .rc_codes = RC_MAP_DVICO_PORTABLE, .module_name = KBUILD_MODNAME, .rc_query = cxusb_rc_query, .allowed_protos = RC_PROTO_BIT_NEC, }, .generic_bulk_ctrl_endpoint = 0x01, .num_device_descs = 1, .devices = { { "DViCO FusionHDTV DVB-T USB (TH7579)", { &cxusb_table[DVICO_BLUEBIRD_TH7579_COLD], NULL }, { &cxusb_table[DVICO_BLUEBIRD_TH7579_WARM], NULL }, }, } }; static struct dvb_usb_device_properties cxusb_bluebird_dualdig4_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = CYPRESS_FX2, .size_of_priv = sizeof(struct cxusb_state), .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .streaming_ctrl = cxusb_streaming_ctrl, .frontend_attach = cxusb_dualdig4_frontend_attach, .tuner_attach = cxusb_dvico_xc3028_tuner_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 5, .endpoint = 0x02, .u = { .bulk = { .buffersize = 8192, } } }, } }, }, }, .power_ctrl = cxusb_power_ctrl, .i2c_algo = &cxusb_i2c_algo, .generic_bulk_ctrl_endpoint = 0x01, .rc.core = { .rc_interval = 100, .rc_codes = RC_MAP_DVICO_MCE, .module_name = KBUILD_MODNAME, .rc_query = cxusb_bluebird2_rc_query, .allowed_protos = RC_PROTO_BIT_NEC, }, .num_device_descs = 1, .devices = { { "DViCO FusionHDTV DVB-T Dual Digital 4", { NULL }, { &cxusb_table[DVICO_BLUEBIRD_DUAL_4], NULL }, }, } }; static struct dvb_usb_device_properties cxusb_bluebird_nano2_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = CYPRESS_FX2, .identify_state = bluebird_fx2_identify_state, .size_of_priv = sizeof(struct cxusb_state), .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .streaming_ctrl = cxusb_streaming_ctrl, .frontend_attach = cxusb_nano2_frontend_attach, .tuner_attach = cxusb_dvico_xc3028_tuner_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 5, .endpoint = 0x02, .u = { .bulk = { .buffersize = 8192, } } }, } }, }, }, .power_ctrl = cxusb_nano2_power_ctrl, .i2c_algo = &cxusb_i2c_algo, .generic_bulk_ctrl_endpoint = 0x01, .rc.core = { .rc_interval = 100, .rc_codes = RC_MAP_DVICO_PORTABLE, .module_name = KBUILD_MODNAME, .rc_query = cxusb_bluebird2_rc_query, .allowed_protos = RC_PROTO_BIT_NEC, }, .num_device_descs = 1, .devices = { { "DViCO FusionHDTV DVB-T NANO2", { NULL }, { &cxusb_table[DVICO_BLUEBIRD_DVB_T_NANO_2], NULL }, }, } }; static struct dvb_usb_device_properties cxusb_bluebird_nano2_needsfirmware_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-bluebird-02.fw", .download_firmware = bluebird_patch_dvico_firmware_download, .identify_state = bluebird_fx2_identify_state, .size_of_priv = sizeof(struct cxusb_state), .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .streaming_ctrl = cxusb_streaming_ctrl, .frontend_attach = cxusb_nano2_frontend_attach, .tuner_attach = cxusb_dvico_xc3028_tuner_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 5, .endpoint = 0x02, .u = { .bulk = { .buffersize = 8192, } } }, } }, }, }, .power_ctrl = cxusb_nano2_power_ctrl, .i2c_algo = &cxusb_i2c_algo, .generic_bulk_ctrl_endpoint = 0x01, .rc.core = { .rc_interval = 100, .rc_codes = RC_MAP_DVICO_PORTABLE, .module_name = KBUILD_MODNAME, .rc_query = cxusb_rc_query, .allowed_protos = RC_PROTO_BIT_NEC, }, .num_device_descs = 1, .devices = { { "DViCO FusionHDTV DVB-T NANO2 w/o firmware", { &cxusb_table[DVICO_BLUEBIRD_DVB_T_NANO_2], NULL }, { &cxusb_table[DVICO_BLUEBIRD_DVB_T_NANO_2_NFW_WARM], NULL }, }, } }; static struct dvb_usb_device_properties cxusb_aver_a868r_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = CYPRESS_FX2, .size_of_priv = sizeof(struct cxusb_state), .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .streaming_ctrl = cxusb_aver_streaming_ctrl, .frontend_attach = cxusb_aver_lgdt3303_frontend_attach, .tuner_attach = cxusb_mxl5003s_tuner_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 5, .endpoint = 0x04, .u = { .bulk = { .buffersize = 8192, } } }, } }, }, }, .power_ctrl = cxusb_aver_power_ctrl, .i2c_algo = &cxusb_i2c_algo, .generic_bulk_ctrl_endpoint = 0x01, .num_device_descs = 1, .devices = { { "AVerMedia AVerTVHD Volar (A868R)", { NULL }, { &cxusb_table[AVERMEDIA_VOLAR_A868R], NULL }, }, } }; static struct dvb_usb_device_properties cxusb_bluebird_dualdig4_rev2_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = CYPRESS_FX2, .size_of_priv = sizeof(struct cxusb_state), .num_adapters = 1, .adapter = { { .size_of_priv = sizeof(struct dib0700_adapter_state), .num_frontends = 1, .fe = {{ .streaming_ctrl = cxusb_streaming_ctrl, .frontend_attach = cxusb_dualdig4_rev2_frontend_attach, .tuner_attach = cxusb_dualdig4_rev2_tuner_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 7, .endpoint = 0x02, .u = { .bulk = { .buffersize = 4096, } } }, } }, }, }, .power_ctrl = cxusb_bluebird_power_ctrl, .i2c_algo = &cxusb_i2c_algo, .generic_bulk_ctrl_endpoint = 0x01, .rc.core = { .rc_interval = 100, .rc_codes = RC_MAP_DVICO_MCE, .module_name = KBUILD_MODNAME, .rc_query = cxusb_rc_query, .allowed_protos = RC_PROTO_BIT_NEC, }, .num_device_descs = 1, .devices = { { "DViCO FusionHDTV DVB-T Dual Digital 4 (rev 2)", { NULL }, { &cxusb_table[DVICO_BLUEBIRD_DUAL_4_REV_2], NULL }, }, } }; static struct dvb_usb_device_properties cxusb_d680_dmb_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = CYPRESS_FX2, .size_of_priv = sizeof(struct cxusb_state), .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .streaming_ctrl = cxusb_d680_dmb_streaming_ctrl, .frontend_attach = cxusb_d680_dmb_frontend_attach, .tuner_attach = cxusb_d680_dmb_tuner_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 5, .endpoint = 0x02, .u = { .bulk = { .buffersize = 8192, } } }, } }, }, }, .power_ctrl = cxusb_d680_dmb_power_ctrl, .i2c_algo = &cxusb_i2c_algo, .generic_bulk_ctrl_endpoint = 0x01, .rc.core = { .rc_interval = 100, .rc_codes = RC_MAP_TOTAL_MEDIA_IN_HAND_02, .module_name = KBUILD_MODNAME, .rc_query = cxusb_d680_dmb_rc_query, .allowed_protos = RC_PROTO_BIT_UNKNOWN, }, .num_device_descs = 1, .devices = { { "Conexant DMB-TH Stick", { NULL }, { &cxusb_table[CONEXANT_D680_DMB], NULL }, }, } }; static struct dvb_usb_device_properties cxusb_mygica_d689_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = CYPRESS_FX2, .size_of_priv = sizeof(struct cxusb_state), .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .streaming_ctrl = cxusb_d680_dmb_streaming_ctrl, .frontend_attach = cxusb_mygica_d689_frontend_attach, .tuner_attach = cxusb_mygica_d689_tuner_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 5, .endpoint = 0x02, .u = { .bulk = { .buffersize = 8192, } } }, } }, }, }, .power_ctrl = cxusb_d680_dmb_power_ctrl, .i2c_algo = &cxusb_i2c_algo, .generic_bulk_ctrl_endpoint = 0x01, .rc.core = { .rc_interval = 100, .rc_codes = RC_MAP_D680_DMB, .module_name = KBUILD_MODNAME, .rc_query = cxusb_d680_dmb_rc_query, .allowed_protos = RC_PROTO_BIT_UNKNOWN, }, .num_device_descs = 1, .devices = { { "Mygica D689 DMB-TH", { NULL }, { &cxusb_table[MYGICA_D689], NULL }, }, } }; static struct usb_driver cxusb_driver = { .name = "dvb_usb_cxusb", .probe = cxusb_probe, .disconnect = cxusb_disconnect, .id_table = cxusb_table, }; module_usb_driver(cxusb_driver); MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>"); MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>"); MODULE_AUTHOR("Maciej S. Szmigiero <mail@maciej.szmigiero.name>"); MODULE_DESCRIPTION("Driver for Conexant USB2.0 hybrid reference design"); MODULE_LICENSE("GPL");
5 5 5 5 5 3 3 2 2 1 2 5 5 4 2 1 5 4 4 3 2 2 1 3 4 1 1 1 4 4 3 1 1 4 6 5 3 2 5 1 3 3 2 1 1 1 3 2 2 5 3 3 2 2 2 1 1 1 1 1 1 1 1 4 3 2 2 10 10 10 10 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 // SPDX-License-Identifier: GPL-2.0-only /* * * Author Karsten Keil <kkeil@novell.com> * * Copyright 2008 by Karsten Keil <kkeil@novell.com> */ #include <linux/mISDNif.h> #include <linux/slab.h> #include <linux/export.h> #include "core.h" static u_int *debug; static struct proto mISDN_proto = { .name = "misdn", .owner = THIS_MODULE, .obj_size = sizeof(struct mISDN_sock) }; #define _pms(sk) ((struct mISDN_sock *)sk) static struct mISDN_sock_list data_sockets = { .lock = __RW_LOCK_UNLOCKED(data_sockets.lock) }; static struct mISDN_sock_list base_sockets = { .lock = __RW_LOCK_UNLOCKED(base_sockets.lock) }; #define L2_HEADER_LEN 4 static inline struct sk_buff * _l2_alloc_skb(unsigned int len, gfp_t gfp_mask) { struct sk_buff *skb; skb = alloc_skb(len + L2_HEADER_LEN, gfp_mask); if (likely(skb)) skb_reserve(skb, L2_HEADER_LEN); return skb; } static void mISDN_sock_link(struct mISDN_sock_list *l, struct sock *sk) { write_lock_bh(&l->lock); sk_add_node(sk, &l->head); write_unlock_bh(&l->lock); } static void mISDN_sock_unlink(struct mISDN_sock_list *l, struct sock *sk) { write_lock_bh(&l->lock); sk_del_node_init(sk); write_unlock_bh(&l->lock); } static int mISDN_send(struct mISDNchannel *ch, struct sk_buff *skb) { struct mISDN_sock *msk; int err; msk = container_of(ch, struct mISDN_sock, ch); if (*debug & DEBUG_SOCKET) printk(KERN_DEBUG "%s len %d %p\n", __func__, skb->len, skb); if (msk->sk.sk_state == MISDN_CLOSED) return -EUNATCH; __net_timestamp(skb); err = sock_queue_rcv_skb(&msk->sk, skb); if (err) printk(KERN_WARNING "%s: error %d\n", __func__, err); return err; } static int mISDN_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg) { struct mISDN_sock *msk; msk = container_of(ch, struct mISDN_sock, ch); if (*debug & DEBUG_SOCKET) printk(KERN_DEBUG "%s(%p, %x, %p)\n", __func__, ch, cmd, arg); switch (cmd) { case CLOSE_CHANNEL: msk->sk.sk_state = MISDN_CLOSED; break; } return 0; } static inline void mISDN_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) { struct __kernel_old_timeval tv; if (_pms(sk)->cmask & MISDN_TIME_STAMP) { skb_get_timestamp(skb, &tv); put_cmsg(msg, SOL_MISDN, MISDN_TIME_STAMP, sizeof(tv), &tv); } } static int mISDN_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sk_buff *skb; struct sock *sk = sock->sk; int copied, err; if (*debug & DEBUG_SOCKET) printk(KERN_DEBUG "%s: len %d, flags %x ch.nr %d, proto %x\n", __func__, (int)len, flags, _pms(sk)->ch.nr, sk->sk_protocol); if (flags & (MSG_OOB)) return -EOPNOTSUPP; if (sk->sk_state == MISDN_CLOSED) return 0; skb = skb_recv_datagram(sk, flags, &err); if (!skb) return err; if (msg->msg_name) { DECLARE_SOCKADDR(struct sockaddr_mISDN *, maddr, msg->msg_name); maddr->family = AF_ISDN; maddr->dev = _pms(sk)->dev->id; if ((sk->sk_protocol == ISDN_P_LAPD_TE) || (sk->sk_protocol == ISDN_P_LAPD_NT)) { maddr->channel = (mISDN_HEAD_ID(skb) >> 16) & 0xff; maddr->tei = (mISDN_HEAD_ID(skb) >> 8) & 0xff; maddr->sapi = mISDN_HEAD_ID(skb) & 0xff; } else { maddr->channel = _pms(sk)->ch.nr; maddr->sapi = _pms(sk)->ch.addr & 0xFF; maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xFF; } msg->msg_namelen = sizeof(*maddr); } copied = skb->len + MISDN_HEADER_LEN; if (len < copied) { if (flags & MSG_PEEK) refcount_dec(&skb->users); else skb_queue_head(&sk->sk_receive_queue, skb); return -ENOSPC; } memcpy(skb_push(skb, MISDN_HEADER_LEN), mISDN_HEAD_P(skb), MISDN_HEADER_LEN); err = skb_copy_datagram_msg(skb, 0, msg, copied); mISDN_sock_cmsg(sk, msg, skb); skb_free_datagram(sk, skb); return err ? : copied; } static int mISDN_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct sk_buff *skb; int err = -ENOMEM; if (*debug & DEBUG_SOCKET) printk(KERN_DEBUG "%s: len %d flags %x ch %d proto %x\n", __func__, (int)len, msg->msg_flags, _pms(sk)->ch.nr, sk->sk_protocol); if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE)) return -EINVAL; if (len < MISDN_HEADER_LEN) return -EINVAL; if (sk->sk_state != MISDN_BOUND) return -EBADFD; lock_sock(sk); skb = _l2_alloc_skb(len, GFP_KERNEL); if (!skb) goto done; if (memcpy_from_msg(skb_put(skb, len), msg, len)) { err = -EFAULT; goto done; } memcpy(mISDN_HEAD_P(skb), skb->data, MISDN_HEADER_LEN); skb_pull(skb, MISDN_HEADER_LEN); if (msg->msg_namelen >= sizeof(struct sockaddr_mISDN)) { /* if we have a address, we use it */ DECLARE_SOCKADDR(struct sockaddr_mISDN *, maddr, msg->msg_name); mISDN_HEAD_ID(skb) = maddr->channel; } else { /* use default for L2 messages */ if ((sk->sk_protocol == ISDN_P_LAPD_TE) || (sk->sk_protocol == ISDN_P_LAPD_NT)) mISDN_HEAD_ID(skb) = _pms(sk)->ch.nr; } if (*debug & DEBUG_SOCKET) printk(KERN_DEBUG "%s: ID:%x\n", __func__, mISDN_HEAD_ID(skb)); err = -ENODEV; if (!_pms(sk)->ch.peer) goto done; err = _pms(sk)->ch.recv(_pms(sk)->ch.peer, skb); if (err) goto done; else { skb = NULL; err = len; } done: kfree_skb(skb); release_sock(sk); return err; } static int data_sock_release(struct socket *sock) { struct sock *sk = sock->sk; if (*debug & DEBUG_SOCKET) printk(KERN_DEBUG "%s(%p) sk=%p\n", __func__, sock, sk); if (!sk) return 0; switch (sk->sk_protocol) { case ISDN_P_TE_S0: case ISDN_P_NT_S0: case ISDN_P_TE_E1: case ISDN_P_NT_E1: if (sk->sk_state == MISDN_BOUND) delete_channel(&_pms(sk)->ch); else mISDN_sock_unlink(&data_sockets, sk); break; case ISDN_P_LAPD_TE: case ISDN_P_LAPD_NT: case ISDN_P_B_RAW: case ISDN_P_B_HDLC: case ISDN_P_B_X75SLP: case ISDN_P_B_L2DTMF: case ISDN_P_B_L2DSP: case ISDN_P_B_L2DSPHDLC: delete_channel(&_pms(sk)->ch); mISDN_sock_unlink(&data_sockets, sk); break; } lock_sock(sk); sock_orphan(sk); skb_queue_purge(&sk->sk_receive_queue); release_sock(sk); sock_put(sk); return 0; } static int data_sock_ioctl_bound(struct sock *sk, unsigned int cmd, void __user *p) { struct mISDN_ctrl_req cq; int err = -EINVAL, val[2]; struct mISDNchannel *bchan, *next; lock_sock(sk); if (!_pms(sk)->dev) { err = -ENODEV; goto done; } switch (cmd) { case IMCTRLREQ: if (copy_from_user(&cq, p, sizeof(cq))) { err = -EFAULT; break; } if ((sk->sk_protocol & ~ISDN_P_B_MASK) == ISDN_P_B_START) { list_for_each_entry_safe(bchan, next, &_pms(sk)->dev->bchannels, list) { if (bchan->nr == cq.channel) { err = bchan->ctrl(bchan, CONTROL_CHANNEL, &cq); break; } } } else err = _pms(sk)->dev->D.ctrl(&_pms(sk)->dev->D, CONTROL_CHANNEL, &cq); if (err) break; if (copy_to_user(p, &cq, sizeof(cq))) err = -EFAULT; break; case IMCLEAR_L2: if (sk->sk_protocol != ISDN_P_LAPD_NT) { err = -EINVAL; break; } val[0] = cmd; if (get_user(val[1], (int __user *)p)) { err = -EFAULT; break; } err = _pms(sk)->dev->teimgr->ctrl(_pms(sk)->dev->teimgr, CONTROL_CHANNEL, val); break; case IMHOLD_L1: if (sk->sk_protocol != ISDN_P_LAPD_NT && sk->sk_protocol != ISDN_P_LAPD_TE) { err = -EINVAL; break; } val[0] = cmd; if (get_user(val[1], (int __user *)p)) { err = -EFAULT; break; } err = _pms(sk)->dev->teimgr->ctrl(_pms(sk)->dev->teimgr, CONTROL_CHANNEL, val); break; default: err = -EINVAL; break; } done: release_sock(sk); return err; } static int data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { int err = 0, id; struct sock *sk = sock->sk; struct mISDNdevice *dev; struct mISDNversion ver; switch (cmd) { case IMGETVERSION: ver.major = MISDN_MAJOR_VERSION; ver.minor = MISDN_MINOR_VERSION; ver.release = MISDN_RELEASE; if (copy_to_user((void __user *)arg, &ver, sizeof(ver))) err = -EFAULT; break; case IMGETCOUNT: id = get_mdevice_count(); if (put_user(id, (int __user *)arg)) err = -EFAULT; break; case IMGETDEVINFO: if (get_user(id, (int __user *)arg)) { err = -EFAULT; break; } dev = get_mdevice(id); if (dev) { struct mISDN_devinfo di; memset(&di, 0, sizeof(di)); di.id = dev->id; di.Dprotocols = dev->Dprotocols; di.Bprotocols = dev->Bprotocols | get_all_Bprotocols(); di.protocol = dev->D.protocol; memcpy(di.channelmap, dev->channelmap, sizeof(di.channelmap)); di.nrbchan = dev->nrbchan; strscpy(di.name, dev_name(&dev->dev), sizeof(di.name)); if (copy_to_user((void __user *)arg, &di, sizeof(di))) err = -EFAULT; } else err = -ENODEV; break; default: if (sk->sk_state == MISDN_BOUND) err = data_sock_ioctl_bound(sk, cmd, (void __user *)arg); else err = -ENOTCONN; } return err; } static int data_sock_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; int err = 0, opt = 0; if (*debug & DEBUG_SOCKET) printk(KERN_DEBUG "%s(%p, %d, %x, optval, %d)\n", __func__, sock, level, optname, optlen); lock_sock(sk); switch (optname) { case MISDN_TIME_STAMP: err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); if (err) break; if (opt) _pms(sk)->cmask |= MISDN_TIME_STAMP; else _pms(sk)->cmask &= ~MISDN_TIME_STAMP; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int data_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; int len, opt; if (get_user(len, optlen)) return -EFAULT; if (len != sizeof(char)) return -EINVAL; switch (optname) { case MISDN_TIME_STAMP: if (_pms(sk)->cmask & MISDN_TIME_STAMP) opt = 1; else opt = 0; if (put_user(opt, optval)) return -EFAULT; break; default: return -ENOPROTOOPT; } return 0; } static int data_sock_bind(struct socket *sock, struct sockaddr_unsized *addr, int addr_len) { struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr; struct sock *sk = sock->sk; struct sock *csk; int err = 0; if (*debug & DEBUG_SOCKET) printk(KERN_DEBUG "%s(%p) sk=%p\n", __func__, sock, sk); if (addr_len != sizeof(struct sockaddr_mISDN)) return -EINVAL; if (!maddr || maddr->family != AF_ISDN) return -EINVAL; lock_sock(sk); if (_pms(sk)->dev) { err = -EALREADY; goto done; } _pms(sk)->dev = get_mdevice(maddr->dev); if (!_pms(sk)->dev) { err = -ENODEV; goto done; } if (sk->sk_protocol < ISDN_P_B_START) { read_lock_bh(&data_sockets.lock); sk_for_each(csk, &data_sockets.head) { if (sk == csk) continue; if (_pms(csk)->dev != _pms(sk)->dev) continue; if (csk->sk_protocol >= ISDN_P_B_START) continue; if (IS_ISDN_P_TE(csk->sk_protocol) == IS_ISDN_P_TE(sk->sk_protocol)) continue; read_unlock_bh(&data_sockets.lock); err = -EBUSY; goto done; } read_unlock_bh(&data_sockets.lock); } _pms(sk)->ch.send = mISDN_send; _pms(sk)->ch.ctrl = mISDN_ctrl; switch (sk->sk_protocol) { case ISDN_P_TE_S0: case ISDN_P_NT_S0: case ISDN_P_TE_E1: case ISDN_P_NT_E1: mISDN_sock_unlink(&data_sockets, sk); err = connect_layer1(_pms(sk)->dev, &_pms(sk)->ch, sk->sk_protocol, maddr); if (err) mISDN_sock_link(&data_sockets, sk); break; case ISDN_P_LAPD_TE: case ISDN_P_LAPD_NT: err = create_l2entity(_pms(sk)->dev, &_pms(sk)->ch, sk->sk_protocol, maddr); break; case ISDN_P_B_RAW: case ISDN_P_B_HDLC: case ISDN_P_B_X75SLP: case ISDN_P_B_L2DTMF: case ISDN_P_B_L2DSP: case ISDN_P_B_L2DSPHDLC: err = connect_Bstack(_pms(sk)->dev, &_pms(sk)->ch, sk->sk_protocol, maddr); break; default: err = -EPROTONOSUPPORT; } if (err) goto done; sk->sk_state = MISDN_BOUND; _pms(sk)->ch.protocol = sk->sk_protocol; done: release_sock(sk); return err; } static int data_sock_getname(struct socket *sock, struct sockaddr *addr, int peer) { struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr; struct sock *sk = sock->sk; if (!_pms(sk)->dev) return -EBADFD; lock_sock(sk); maddr->family = AF_ISDN; maddr->dev = _pms(sk)->dev->id; maddr->channel = _pms(sk)->ch.nr; maddr->sapi = _pms(sk)->ch.addr & 0xff; maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xff; release_sock(sk); return sizeof(*maddr); } static const struct proto_ops data_sock_ops = { .family = PF_ISDN, .owner = THIS_MODULE, .release = data_sock_release, .ioctl = data_sock_ioctl, .bind = data_sock_bind, .getname = data_sock_getname, .sendmsg = mISDN_sock_sendmsg, .recvmsg = mISDN_sock_recvmsg, .poll = datagram_poll, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = data_sock_setsockopt, .getsockopt = data_sock_getsockopt, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .mmap = sock_no_mmap }; static int data_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; if (sock->type != SOCK_DGRAM) return -ESOCKTNOSUPPORT; sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto, kern); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sock->ops = &data_sock_ops; sock->state = SS_UNCONNECTED; sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = protocol; sk->sk_state = MISDN_OPEN; mISDN_sock_link(&data_sockets, sk); return 0; } static int base_sock_release(struct socket *sock) { struct sock *sk = sock->sk; printk(KERN_DEBUG "%s(%p) sk=%p\n", __func__, sock, sk); if (!sk) return 0; mISDN_sock_unlink(&base_sockets, sk); sock_orphan(sk); sock_put(sk); return 0; } static int base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { int err = 0, id; struct mISDNdevice *dev; struct mISDNversion ver; switch (cmd) { case IMGETVERSION: ver.major = MISDN_MAJOR_VERSION; ver.minor = MISDN_MINOR_VERSION; ver.release = MISDN_RELEASE; if (copy_to_user((void __user *)arg, &ver, sizeof(ver))) err = -EFAULT; break; case IMGETCOUNT: id = get_mdevice_count(); if (put_user(id, (int __user *)arg)) err = -EFAULT; break; case IMGETDEVINFO: if (get_user(id, (int __user *)arg)) { err = -EFAULT; break; } dev = get_mdevice(id); if (dev) { struct mISDN_devinfo di; memset(&di, 0, sizeof(di)); di.id = dev->id; di.Dprotocols = dev->Dprotocols; di.Bprotocols = dev->Bprotocols | get_all_Bprotocols(); di.protocol = dev->D.protocol; memcpy(di.channelmap, dev->channelmap, sizeof(di.channelmap)); di.nrbchan = dev->nrbchan; strscpy(di.name, dev_name(&dev->dev), sizeof(di.name)); if (copy_to_user((void __user *)arg, &di, sizeof(di))) err = -EFAULT; } else err = -ENODEV; break; case IMSETDEVNAME: { struct mISDN_devrename dn; if (copy_from_user(&dn, (void __user *)arg, sizeof(dn))) { err = -EFAULT; break; } dn.name[sizeof(dn.name) - 1] = '\0'; dev = get_mdevice(dn.id); if (dev) err = device_rename(&dev->dev, dn.name); else err = -ENODEV; } break; default: err = -EINVAL; } return err; } static int base_sock_bind(struct socket *sock, struct sockaddr_unsized *addr, int addr_len) { struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr; struct sock *sk = sock->sk; int err = 0; if (addr_len < sizeof(struct sockaddr_mISDN)) return -EINVAL; if (!maddr || maddr->family != AF_ISDN) return -EINVAL; lock_sock(sk); if (_pms(sk)->dev) { err = -EALREADY; goto done; } _pms(sk)->dev = get_mdevice(maddr->dev); if (!_pms(sk)->dev) { err = -ENODEV; goto done; } sk->sk_state = MISDN_BOUND; done: release_sock(sk); return err; } static const struct proto_ops base_sock_ops = { .family = PF_ISDN, .owner = THIS_MODULE, .release = base_sock_release, .ioctl = base_sock_ioctl, .bind = base_sock_bind, .getname = sock_no_getname, .sendmsg = sock_no_sendmsg, .recvmsg = sock_no_recvmsg, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .mmap = sock_no_mmap }; static int base_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; if (sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; if (!capable(CAP_NET_RAW)) return -EPERM; sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto, kern); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sock->ops = &base_sock_ops; sock->state = SS_UNCONNECTED; sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = protocol; sk->sk_state = MISDN_OPEN; mISDN_sock_link(&base_sockets, sk); return 0; } static int mISDN_sock_create(struct net *net, struct socket *sock, int proto, int kern) { int err = -EPROTONOSUPPORT; switch (proto) { case ISDN_P_BASE: err = base_sock_create(net, sock, proto, kern); break; case ISDN_P_TE_S0: case ISDN_P_NT_S0: case ISDN_P_TE_E1: case ISDN_P_NT_E1: case ISDN_P_LAPD_TE: case ISDN_P_LAPD_NT: case ISDN_P_B_RAW: case ISDN_P_B_HDLC: case ISDN_P_B_X75SLP: case ISDN_P_B_L2DTMF: case ISDN_P_B_L2DSP: case ISDN_P_B_L2DSPHDLC: err = data_sock_create(net, sock, proto, kern); break; default: return err; } return err; } static const struct net_proto_family mISDN_sock_family_ops = { .owner = THIS_MODULE, .family = PF_ISDN, .create = mISDN_sock_create, }; int misdn_sock_init(u_int *deb) { int err; debug = deb; err = sock_register(&mISDN_sock_family_ops); if (err) printk(KERN_ERR "%s: error(%d)\n", __func__, err); return err; } void misdn_sock_cleanup(void) { sock_unregister(PF_ISDN); }
687 760 759 209 209 206 209 209 209 209 209 211 209 211 211 208 208 940 942 206 206 206 206 206 777 775 775 775 777 5 775 777 144 145 775 12 775 935 209 209 208 209 205 206 11 11 11 206 760 759 755 11 11 758 759 758 756 757 679 680 678 679 680 677 651 650 650 209 208 209 206 206 142 206 11 205 205 206 209 208 208 209 207 209 144 209 209 209 144 144 209 209 209 206 142 205 142 205 756 757 760 759 760 760 760 759 760 128 757 760 759 757 123 123 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 // SPDX-License-Identifier: GPL-2.0-or-later /* * zswap.c - zswap driver file * * zswap is a cache that takes pages that are in the process * of being swapped out and attempts to compress and store them in a * RAM-based memory pool. This can result in a significant I/O reduction on * the swap device and, in the case where decompressing from RAM is faster * than reading from the swap device, can also improve workload performance. * * Copyright (C) 2012 Seth Jennings <sjenning@linux.vnet.ibm.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/cpu.h> #include <linux/highmem.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/types.h> #include <linux/atomic.h> #include <linux/swap.h> #include <linux/crypto.h> #include <linux/scatterlist.h> #include <linux/mempolicy.h> #include <linux/mempool.h> #include <crypto/acompress.h> #include <crypto/scatterwalk.h> #include <linux/zswap.h> #include <linux/mm_types.h> #include <linux/page-flags.h> #include <linux/swapops.h> #include <linux/writeback.h> #include <linux/pagemap.h> #include <linux/workqueue.h> #include <linux/list_lru.h> #include <linux/zsmalloc.h> #include "swap.h" #include "internal.h" /********************************* * statistics **********************************/ /* The number of pages currently stored in zswap */ atomic_long_t zswap_stored_pages = ATOMIC_LONG_INIT(0); /* The number of incompressible pages currently stored in zswap */ static atomic_long_t zswap_stored_incompressible_pages = ATOMIC_LONG_INIT(0); /* * The statistics below are not protected from concurrent access for * performance reasons so they may not be a 100% accurate. However, * they do provide useful information on roughly how many times a * certain event is occurring. */ /* Pool limit was hit (see zswap_max_pool_percent) */ static u64 zswap_pool_limit_hit; /* Pages written back when pool limit was reached */ static u64 zswap_written_back_pages; /* Store failed due to a reclaim failure after pool limit was reached */ static u64 zswap_reject_reclaim_fail; /* Store failed due to compression algorithm failure */ static u64 zswap_reject_compress_fail; /* Compressed page was too big for the allocator to (optimally) store */ static u64 zswap_reject_compress_poor; /* Load or writeback failed due to decompression failure */ static u64 zswap_decompress_fail; /* Store failed because underlying allocator could not get memory */ static u64 zswap_reject_alloc_fail; /* Store failed because the entry metadata could not be allocated (rare) */ static u64 zswap_reject_kmemcache_fail; /* Shrinker work queue */ static struct workqueue_struct *shrink_wq; /* Pool limit was hit, we need to calm down */ static bool zswap_pool_reached_full; /********************************* * tunables **********************************/ #define ZSWAP_PARAM_UNSET "" static int zswap_setup(void); /* Enable/disable zswap */ static DEFINE_STATIC_KEY_MAYBE(CONFIG_ZSWAP_DEFAULT_ON, zswap_ever_enabled); static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON); static int zswap_enabled_param_set(const char *, const struct kernel_param *); static const struct kernel_param_ops zswap_enabled_param_ops = { .set = zswap_enabled_param_set, .get = param_get_bool, }; module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644); /* Crypto compressor to use */ static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT; static int zswap_compressor_param_set(const char *, const struct kernel_param *); static const struct kernel_param_ops zswap_compressor_param_ops = { .set = zswap_compressor_param_set, .get = param_get_charp, .free = param_free_charp, }; module_param_cb(compressor, &zswap_compressor_param_ops, &zswap_compressor, 0644); /* The maximum percentage of memory that the compressed pool can occupy */ static unsigned int zswap_max_pool_percent = 20; module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644); /* The threshold for accepting new pages after the max_pool_percent was hit */ static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */ module_param_named(accept_threshold_percent, zswap_accept_thr_percent, uint, 0644); /* Enable/disable memory pressure-based shrinker. */ static bool zswap_shrinker_enabled = IS_ENABLED( CONFIG_ZSWAP_SHRINKER_DEFAULT_ON); module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644); bool zswap_is_enabled(void) { return zswap_enabled; } bool zswap_never_enabled(void) { return !static_branch_maybe(CONFIG_ZSWAP_DEFAULT_ON, &zswap_ever_enabled); } /********************************* * data structures **********************************/ struct crypto_acomp_ctx { struct crypto_acomp *acomp; struct acomp_req *req; struct crypto_wait wait; u8 *buffer; struct mutex mutex; }; /* * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock. * The only case where lru_lock is not acquired while holding tree.lock is * when a zswap_entry is taken off the lru for writeback, in that case it * needs to be verified that it's still valid in the tree. */ struct zswap_pool { struct zs_pool *zs_pool; struct crypto_acomp_ctx __percpu *acomp_ctx; struct percpu_ref ref; struct list_head list; struct work_struct release_work; struct hlist_node node; char tfm_name[CRYPTO_MAX_ALG_NAME]; }; /* Global LRU lists shared by all zswap pools. */ static struct list_lru zswap_list_lru; /* The lock protects zswap_next_shrink updates. */ static DEFINE_SPINLOCK(zswap_shrink_lock); static struct mem_cgroup *zswap_next_shrink; static struct work_struct zswap_shrink_work; static struct shrinker *zswap_shrinker; /* * struct zswap_entry * * This structure contains the metadata for tracking a single compressed * page within zswap. * * swpentry - associated swap entry, the offset indexes into the xarray * length - the length in bytes of the compressed page data. Needed during * decompression. * referenced - true if the entry recently entered the zswap pool. Unset by the * writeback logic. The entry is only reclaimed by the writeback * logic if referenced is unset. See comments in the shrinker * section for context. * pool - the zswap_pool the entry's data is in * handle - zsmalloc allocation handle that stores the compressed page data * objcg - the obj_cgroup that the compressed memory is charged to * lru - handle to the pool's lru used to evict pages. */ struct zswap_entry { swp_entry_t swpentry; unsigned int length; bool referenced; struct zswap_pool *pool; unsigned long handle; struct obj_cgroup *objcg; struct list_head lru; }; static struct xarray *zswap_trees[MAX_SWAPFILES]; static unsigned int nr_zswap_trees[MAX_SWAPFILES]; /* RCU-protected iteration */ static LIST_HEAD(zswap_pools); /* protects zswap_pools list modification */ static DEFINE_SPINLOCK(zswap_pools_lock); /* pool counter to provide unique names to zsmalloc */ static atomic_t zswap_pools_count = ATOMIC_INIT(0); enum zswap_init_type { ZSWAP_UNINIT, ZSWAP_INIT_SUCCEED, ZSWAP_INIT_FAILED }; static enum zswap_init_type zswap_init_state; /* used to ensure the integrity of initialization */ static DEFINE_MUTEX(zswap_init_lock); /* init completed, but couldn't create the initial pool */ static bool zswap_has_pool; /********************************* * helpers and fwd declarations **********************************/ /* One swap address space for each 64M swap space */ #define ZSWAP_ADDRESS_SPACE_SHIFT 14 #define ZSWAP_ADDRESS_SPACE_PAGES (1 << ZSWAP_ADDRESS_SPACE_SHIFT) static inline struct xarray *swap_zswap_tree(swp_entry_t swp) { return &zswap_trees[swp_type(swp)][swp_offset(swp) >> ZSWAP_ADDRESS_SPACE_SHIFT]; } #define zswap_pool_debug(msg, p) \ pr_debug("%s pool %s\n", msg, (p)->tfm_name) /********************************* * pool functions **********************************/ static void __zswap_pool_empty(struct percpu_ref *ref); static struct zswap_pool *zswap_pool_create(char *compressor) { struct zswap_pool *pool; char name[38]; /* 'zswap' + 32 char (max) num + \0 */ int ret, cpu; if (!zswap_has_pool && !strcmp(compressor, ZSWAP_PARAM_UNSET)) return NULL; pool = kzalloc_obj(*pool); if (!pool) return NULL; /* unique name for each pool specifically required by zsmalloc */ snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count)); pool->zs_pool = zs_create_pool(name); if (!pool->zs_pool) goto error; strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name)); pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx); if (!pool->acomp_ctx) { pr_err("percpu alloc failed\n"); goto error; } for_each_possible_cpu(cpu) mutex_init(&per_cpu_ptr(pool->acomp_ctx, cpu)->mutex); ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); if (ret) goto error; /* being the current pool takes 1 ref; this func expects the * caller to always add the new pool as the current pool */ ret = percpu_ref_init(&pool->ref, __zswap_pool_empty, PERCPU_REF_ALLOW_REINIT, GFP_KERNEL); if (ret) goto ref_fail; INIT_LIST_HEAD(&pool->list); zswap_pool_debug("created", pool); return pool; ref_fail: cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); error: if (pool->acomp_ctx) free_percpu(pool->acomp_ctx); if (pool->zs_pool) zs_destroy_pool(pool->zs_pool); kfree(pool); return NULL; } static struct zswap_pool *__zswap_pool_create_fallback(void) { if (!crypto_has_acomp(zswap_compressor, 0, 0) && strcmp(zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) { pr_err("compressor %s not available, using default %s\n", zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT); param_free_charp(&zswap_compressor); zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT; } /* Default compressor should be available. Kconfig bug? */ if (WARN_ON_ONCE(!crypto_has_acomp(zswap_compressor, 0, 0))) { zswap_compressor = ZSWAP_PARAM_UNSET; return NULL; } return zswap_pool_create(zswap_compressor); } static void zswap_pool_destroy(struct zswap_pool *pool) { zswap_pool_debug("destroying", pool); cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); free_percpu(pool->acomp_ctx); zs_destroy_pool(pool->zs_pool); kfree(pool); } static void __zswap_pool_release(struct work_struct *work) { struct zswap_pool *pool = container_of(work, typeof(*pool), release_work); synchronize_rcu(); /* nobody should have been able to get a ref... */ WARN_ON(!percpu_ref_is_zero(&pool->ref)); percpu_ref_exit(&pool->ref); /* pool is now off zswap_pools list and has no references. */ zswap_pool_destroy(pool); } static struct zswap_pool *zswap_pool_current(void); static void __zswap_pool_empty(struct percpu_ref *ref) { struct zswap_pool *pool; pool = container_of(ref, typeof(*pool), ref); spin_lock_bh(&zswap_pools_lock); WARN_ON(pool == zswap_pool_current()); list_del_rcu(&pool->list); INIT_WORK(&pool->release_work, __zswap_pool_release); schedule_work(&pool->release_work); spin_unlock_bh(&zswap_pools_lock); } static int __must_check zswap_pool_tryget(struct zswap_pool *pool) { if (!pool) return 0; return percpu_ref_tryget(&pool->ref); } /* The caller must already have a reference. */ static void zswap_pool_get(struct zswap_pool *pool) { percpu_ref_get(&pool->ref); } static void zswap_pool_put(struct zswap_pool *pool) { percpu_ref_put(&pool->ref); } static struct zswap_pool *__zswap_pool_current(void) { struct zswap_pool *pool; pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list); WARN_ONCE(!pool && zswap_has_pool, "%s: no page storage pool!\n", __func__); return pool; } static struct zswap_pool *zswap_pool_current(void) { assert_spin_locked(&zswap_pools_lock); return __zswap_pool_current(); } static struct zswap_pool *zswap_pool_current_get(void) { struct zswap_pool *pool; rcu_read_lock(); pool = __zswap_pool_current(); if (!zswap_pool_tryget(pool)) pool = NULL; rcu_read_unlock(); return pool; } /* type and compressor must be null-terminated */ static struct zswap_pool *zswap_pool_find_get(char *compressor) { struct zswap_pool *pool; assert_spin_locked(&zswap_pools_lock); list_for_each_entry_rcu(pool, &zswap_pools, list) { if (strcmp(pool->tfm_name, compressor)) continue; /* if we can't get it, it's about to be destroyed */ if (!zswap_pool_tryget(pool)) continue; return pool; } return NULL; } static unsigned long zswap_max_pages(void) { return totalram_pages() * zswap_max_pool_percent / 100; } static unsigned long zswap_accept_thr_pages(void) { return zswap_max_pages() * zswap_accept_thr_percent / 100; } unsigned long zswap_total_pages(void) { struct zswap_pool *pool; unsigned long total = 0; rcu_read_lock(); list_for_each_entry_rcu(pool, &zswap_pools, list) total += zs_get_total_pages(pool->zs_pool); rcu_read_unlock(); return total; } static bool zswap_check_limits(void) { unsigned long cur_pages = zswap_total_pages(); unsigned long max_pages = zswap_max_pages(); if (cur_pages >= max_pages) { zswap_pool_limit_hit++; zswap_pool_reached_full = true; } else if (zswap_pool_reached_full && cur_pages <= zswap_accept_thr_pages()) { zswap_pool_reached_full = false; } return zswap_pool_reached_full; } /********************************* * param callbacks **********************************/ static int zswap_compressor_param_set(const char *val, const struct kernel_param *kp) { struct zswap_pool *pool, *put_pool = NULL; char *s = strstrip((char *)val); bool create_pool = false; int ret = 0; mutex_lock(&zswap_init_lock); switch (zswap_init_state) { case ZSWAP_UNINIT: /* Handled in zswap_setup() */ ret = param_set_charp(s, kp); break; case ZSWAP_INIT_SUCCEED: if (!zswap_has_pool || strcmp(s, *(char **)kp->arg)) create_pool = true; break; case ZSWAP_INIT_FAILED: pr_err("can't set param, initialization failed\n"); ret = -ENODEV; } mutex_unlock(&zswap_init_lock); if (!create_pool) return ret; if (!crypto_has_acomp(s, 0, 0)) { pr_err("compressor %s not available\n", s); return -ENOENT; } spin_lock_bh(&zswap_pools_lock); pool = zswap_pool_find_get(s); if (pool) { zswap_pool_debug("using existing", pool); WARN_ON(pool == zswap_pool_current()); list_del_rcu(&pool->list); } spin_unlock_bh(&zswap_pools_lock); if (!pool) pool = zswap_pool_create(s); else { /* * Restore the initial ref dropped by percpu_ref_kill() * when the pool was decommissioned and switch it again * to percpu mode. */ percpu_ref_resurrect(&pool->ref); /* Drop the ref from zswap_pool_find_get(). */ zswap_pool_put(pool); } if (pool) ret = param_set_charp(s, kp); else ret = -EINVAL; spin_lock_bh(&zswap_pools_lock); if (!ret) { put_pool = zswap_pool_current(); list_add_rcu(&pool->list, &zswap_pools); zswap_has_pool = true; } else if (pool) { /* * Add the possibly pre-existing pool to the end of the pools * list; if it's new (and empty) then it'll be removed and * destroyed by the put after we drop the lock */ list_add_tail_rcu(&pool->list, &zswap_pools); put_pool = pool; } spin_unlock_bh(&zswap_pools_lock); /* * Drop the ref from either the old current pool, * or the new pool we failed to add */ if (put_pool) percpu_ref_kill(&put_pool->ref); return ret; } static int zswap_enabled_param_set(const char *val, const struct kernel_param *kp) { int ret = -ENODEV; /* if this is load-time (pre-init) param setting, only set param. */ if (system_state != SYSTEM_RUNNING) return param_set_bool(val, kp); mutex_lock(&zswap_init_lock); switch (zswap_init_state) { case ZSWAP_UNINIT: if (zswap_setup()) break; fallthrough; case ZSWAP_INIT_SUCCEED: if (!zswap_has_pool) pr_err("can't enable, no pool configured\n"); else ret = param_set_bool(val, kp); break; case ZSWAP_INIT_FAILED: pr_err("can't enable, initialization failed\n"); } mutex_unlock(&zswap_init_lock); return ret; } /********************************* * lru functions **********************************/ /* should be called under RCU */ #ifdef CONFIG_MEMCG static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry) { return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL; } #else static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry) { return NULL; } #endif static inline int entry_to_nid(struct zswap_entry *entry) { return page_to_nid(virt_to_page(entry)); } static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry) { int nid = entry_to_nid(entry); struct mem_cgroup *memcg; /* * Note that it is safe to use rcu_read_lock() here, even in the face of * concurrent memcg offlining: * * 1. list_lru_add() is called before list_lru_one is dead. The * new entry will be reparented to memcg's parent's list_lru. * 2. list_lru_add() is called after list_lru_one is dead. The * new entry will be added directly to memcg's parent's list_lru. * * Similar reasoning holds for list_lru_del(). */ rcu_read_lock(); memcg = mem_cgroup_from_entry(entry); /* will always succeed */ list_lru_add(list_lru, &entry->lru, nid, memcg); rcu_read_unlock(); } static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry) { int nid = entry_to_nid(entry); struct mem_cgroup *memcg; rcu_read_lock(); memcg = mem_cgroup_from_entry(entry); /* will always succeed */ list_lru_del(list_lru, &entry->lru, nid, memcg); rcu_read_unlock(); } void zswap_lruvec_state_init(struct lruvec *lruvec) { atomic_long_set(&lruvec->zswap_lruvec_state.nr_disk_swapins, 0); } void zswap_folio_swapin(struct folio *folio) { struct lruvec *lruvec; if (folio) { lruvec = folio_lruvec(folio); atomic_long_inc(&lruvec->zswap_lruvec_state.nr_disk_swapins); } } /* * This function should be called when a memcg is being offlined. * * Since the global shrinker shrink_worker() may hold a reference * of the memcg, we must check and release the reference in * zswap_next_shrink. * * shrink_worker() must handle the case where this function releases * the reference of memcg being shrunk. */ void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) { /* lock out zswap shrinker walking memcg tree */ spin_lock(&zswap_shrink_lock); if (zswap_next_shrink == memcg) { do { zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL); } while (zswap_next_shrink && !mem_cgroup_online(zswap_next_shrink)); } spin_unlock(&zswap_shrink_lock); } /********************************* * zswap entry functions **********************************/ static struct kmem_cache *zswap_entry_cache; static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid) { struct zswap_entry *entry; entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid); if (!entry) return NULL; return entry; } static void zswap_entry_cache_free(struct zswap_entry *entry) { kmem_cache_free(zswap_entry_cache, entry); } /* * Carries out the common pattern of freeing an entry's zsmalloc allocation, * freeing the entry itself, and decrementing the number of stored pages. */ static void zswap_entry_free(struct zswap_entry *entry) { zswap_lru_del(&zswap_list_lru, entry); zs_free(entry->pool->zs_pool, entry->handle); zswap_pool_put(entry->pool); if (entry->objcg) { obj_cgroup_uncharge_zswap(entry->objcg, entry->length); obj_cgroup_put(entry->objcg); } if (entry->length == PAGE_SIZE) atomic_long_dec(&zswap_stored_incompressible_pages); zswap_entry_cache_free(entry); atomic_long_dec(&zswap_stored_pages); } /********************************* * compressed storage functions **********************************/ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node) { struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); struct crypto_acomp *acomp = NULL; struct acomp_req *req = NULL; u8 *buffer = NULL; int ret; buffer = kmalloc_node(PAGE_SIZE, GFP_KERNEL, cpu_to_node(cpu)); if (!buffer) { ret = -ENOMEM; goto fail; } acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu)); if (IS_ERR(acomp)) { pr_err("could not alloc crypto acomp %s : %pe\n", pool->tfm_name, acomp); ret = PTR_ERR(acomp); goto fail; } req = acomp_request_alloc(acomp); if (!req) { pr_err("could not alloc crypto acomp_request %s\n", pool->tfm_name); ret = -ENOMEM; goto fail; } /* * Only hold the mutex after completing allocations, otherwise we may * recurse into zswap through reclaim and attempt to hold the mutex * again resulting in a deadlock. */ mutex_lock(&acomp_ctx->mutex); crypto_init_wait(&acomp_ctx->wait); /* * if the backend of acomp is async zip, crypto_req_done() will wakeup * crypto_wait_req(); if the backend of acomp is scomp, the callback * won't be called, crypto_wait_req() will return without blocking. */ acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, crypto_req_done, &acomp_ctx->wait); acomp_ctx->buffer = buffer; acomp_ctx->acomp = acomp; acomp_ctx->req = req; mutex_unlock(&acomp_ctx->mutex); return 0; fail: if (!IS_ERR_OR_NULL(acomp)) crypto_free_acomp(acomp); kfree(buffer); return ret; } static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node) { struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); struct acomp_req *req; struct crypto_acomp *acomp; u8 *buffer; if (IS_ERR_OR_NULL(acomp_ctx)) return 0; mutex_lock(&acomp_ctx->mutex); req = acomp_ctx->req; acomp = acomp_ctx->acomp; buffer = acomp_ctx->buffer; acomp_ctx->req = NULL; acomp_ctx->acomp = NULL; acomp_ctx->buffer = NULL; mutex_unlock(&acomp_ctx->mutex); /* * Do the actual freeing after releasing the mutex to avoid subtle * locking dependencies causing deadlocks. */ if (!IS_ERR_OR_NULL(req)) acomp_request_free(req); if (!IS_ERR_OR_NULL(acomp)) crypto_free_acomp(acomp); kfree(buffer); return 0; } static struct crypto_acomp_ctx *acomp_ctx_get_cpu_lock(struct zswap_pool *pool) { struct crypto_acomp_ctx *acomp_ctx; for (;;) { acomp_ctx = raw_cpu_ptr(pool->acomp_ctx); mutex_lock(&acomp_ctx->mutex); if (likely(acomp_ctx->req)) return acomp_ctx; /* * It is possible that we were migrated to a different CPU after * getting the per-CPU ctx but before the mutex was acquired. If * the old CPU got offlined, zswap_cpu_comp_dead() could have * already freed ctx->req (among other things) and set it to * NULL. Just try again on the new CPU that we ended up on. */ mutex_unlock(&acomp_ctx->mutex); } } static void acomp_ctx_put_unlock(struct crypto_acomp_ctx *acomp_ctx) { mutex_unlock(&acomp_ctx->mutex); } static bool zswap_compress(struct page *page, struct zswap_entry *entry, struct zswap_pool *pool) { struct crypto_acomp_ctx *acomp_ctx; struct scatterlist input, output; int comp_ret = 0, alloc_ret = 0; unsigned int dlen = PAGE_SIZE; unsigned long handle; gfp_t gfp; u8 *dst; bool mapped = false; acomp_ctx = acomp_ctx_get_cpu_lock(pool); dst = acomp_ctx->buffer; sg_init_table(&input, 1); sg_set_page(&input, page, PAGE_SIZE, 0); sg_init_one(&output, dst, PAGE_SIZE); acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen); /* * it maybe looks a little bit silly that we send an asynchronous request, * then wait for its completion synchronously. This makes the process look * synchronous in fact. * Theoretically, acomp supports users send multiple acomp requests in one * acomp instance, then get those requests done simultaneously. but in this * case, zswap actually does store and load page by page, there is no * existing method to send the second page before the first page is done * in one thread doing zswap. * but in different threads running on different cpu, we have different * acomp instance, so multiple threads can do (de)compression in parallel. */ comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait); dlen = acomp_ctx->req->dlen; /* * If a page cannot be compressed into a size smaller than PAGE_SIZE, * save the content as is without a compression, to keep the LRU order * of writebacks. If writeback is disabled, reject the page since it * only adds metadata overhead. swap_writeout() will put the page back * to the active LRU list in the case. */ if (comp_ret || !dlen || dlen >= PAGE_SIZE) { if (!mem_cgroup_zswap_writeback_enabled( folio_memcg(page_folio(page)))) { comp_ret = comp_ret ? comp_ret : -EINVAL; goto unlock; } comp_ret = 0; dlen = PAGE_SIZE; dst = kmap_local_page(page); mapped = true; } gfp = GFP_NOWAIT | __GFP_NORETRY | __GFP_HIGHMEM | __GFP_MOVABLE; handle = zs_malloc(pool->zs_pool, dlen, gfp, page_to_nid(page)); if (IS_ERR_VALUE(handle)) { alloc_ret = PTR_ERR((void *)handle); goto unlock; } zs_obj_write(pool->zs_pool, handle, dst, dlen); entry->handle = handle; entry->length = dlen; unlock: if (mapped) kunmap_local(dst); if (comp_ret == -ENOSPC || alloc_ret == -ENOSPC) zswap_reject_compress_poor++; else if (comp_ret) zswap_reject_compress_fail++; else if (alloc_ret) zswap_reject_alloc_fail++; acomp_ctx_put_unlock(acomp_ctx); return comp_ret == 0 && alloc_ret == 0; } static bool zswap_decompress(struct zswap_entry *entry, struct folio *folio) { struct zswap_pool *pool = entry->pool; struct scatterlist input[2]; /* zsmalloc returns an SG list 1-2 entries */ struct scatterlist output; struct crypto_acomp_ctx *acomp_ctx; int ret = 0, dlen; acomp_ctx = acomp_ctx_get_cpu_lock(pool); zs_obj_read_sg_begin(pool->zs_pool, entry->handle, input, entry->length); /* zswap entries of length PAGE_SIZE are not compressed. */ if (entry->length == PAGE_SIZE) { void *dst; WARN_ON_ONCE(input->length != PAGE_SIZE); dst = kmap_local_folio(folio, 0); memcpy_from_sglist(dst, input, 0, PAGE_SIZE); dlen = PAGE_SIZE; kunmap_local(dst); flush_dcache_folio(folio); } else { sg_init_table(&output, 1); sg_set_folio(&output, folio, PAGE_SIZE, 0); acomp_request_set_params(acomp_ctx->req, input, &output, entry->length, PAGE_SIZE); ret = crypto_acomp_decompress(acomp_ctx->req); ret = crypto_wait_req(ret, &acomp_ctx->wait); dlen = acomp_ctx->req->dlen; } zs_obj_read_sg_end(pool->zs_pool, entry->handle); acomp_ctx_put_unlock(acomp_ctx); if (!ret && dlen == PAGE_SIZE) return true; zswap_decompress_fail++; pr_alert_ratelimited("Decompression error from zswap (%d:%lu %s %u->%d)\n", swp_type(entry->swpentry), swp_offset(entry->swpentry), entry->pool->tfm_name, entry->length, dlen); return false; } /********************************* * writeback code **********************************/ /* * Attempts to free an entry by adding a folio to the swap cache, * decompressing the entry data into the folio, and issuing a * bio write to write the folio back to the swap device. * * This can be thought of as a "resumed writeback" of the folio * to the swap device. We are basically resuming the same swap * writeback path that was intercepted with the zswap_store() * in the first place. After the folio has been decompressed into * the swap cache, the compressed version stored by zswap can be * freed. */ static int zswap_writeback_entry(struct zswap_entry *entry, swp_entry_t swpentry) { struct xarray *tree; pgoff_t offset = swp_offset(swpentry); struct folio *folio; struct mempolicy *mpol; bool folio_was_allocated; struct swap_info_struct *si; int ret = 0; /* try to allocate swap cache folio */ si = get_swap_device(swpentry); if (!si) return -EEXIST; mpol = get_task_policy(current); folio = swap_cache_alloc_folio(swpentry, GFP_KERNEL, mpol, NO_INTERLEAVE_INDEX, &folio_was_allocated); put_swap_device(si); if (!folio) return -ENOMEM; /* * Found an existing folio, we raced with swapin or concurrent * shrinker. We generally writeback cold folios from zswap, and * swapin means the folio just became hot, so skip this folio. * For unlikely concurrent shrinker case, it will be unlinked * and freed when invalidated by the concurrent shrinker anyway. */ if (!folio_was_allocated) { ret = -EEXIST; goto out; } /* * folio is locked, and the swapcache is now secured against * concurrent swapping to and from the slot, and concurrent * swapoff so we can safely dereference the zswap tree here. * Verify that the swap entry hasn't been invalidated and recycled * behind our backs, to avoid overwriting a new swap folio with * old compressed data. Only when this is successful can the entry * be dereferenced. */ tree = swap_zswap_tree(swpentry); if (entry != xa_load(tree, offset)) { ret = -ENOMEM; goto out; } if (!zswap_decompress(entry, folio)) { ret = -EIO; goto out; } xa_erase(tree, offset); count_vm_event(ZSWPWB); if (entry->objcg) count_objcg_events(entry->objcg, ZSWPWB, 1); zswap_entry_free(entry); /* folio is up to date */ folio_mark_uptodate(folio); /* move it to the tail of the inactive list after end_writeback */ folio_set_reclaim(folio); /* start writeback */ __swap_writepage(folio, NULL); out: if (ret && ret != -EEXIST) { swap_cache_del_folio(folio); folio_unlock(folio); } folio_put(folio); return ret; } /********************************* * shrinker functions **********************************/ /* * The dynamic shrinker is modulated by the following factors: * * 1. Each zswap entry has a referenced bit, which the shrinker unsets (giving * the entry a second chance) before rotating it in the LRU list. If the * entry is considered again by the shrinker, with its referenced bit unset, * it is written back. The writeback rate as a result is dynamically * adjusted by the pool activities - if the pool is dominated by new entries * (i.e lots of recent zswapouts), these entries will be protected and * the writeback rate will slow down. On the other hand, if the pool has a * lot of stagnant entries, these entries will be reclaimed immediately, * effectively increasing the writeback rate. * * 2. Swapins counter: If we observe swapins, it is a sign that we are * overshrinking and should slow down. We maintain a swapins counter, which * is consumed and subtract from the number of eligible objects on the LRU * in zswap_shrinker_count(). * * 3. Compression ratio. The better the workload compresses, the less gains we * can expect from writeback. We scale down the number of objects available * for reclaim by this ratio. */ static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l, void *arg) { struct zswap_entry *entry = container_of(item, struct zswap_entry, lru); bool *encountered_page_in_swapcache = (bool *)arg; swp_entry_t swpentry; enum lru_status ret = LRU_REMOVED_RETRY; int writeback_result; /* * Second chance algorithm: if the entry has its referenced bit set, give it * a second chance. Only clear the referenced bit and rotate it in the * zswap's LRU list. */ if (entry->referenced) { entry->referenced = false; return LRU_ROTATE; } /* * As soon as we drop the LRU lock, the entry can be freed by * a concurrent invalidation. This means the following: * * 1. We extract the swp_entry_t to the stack, allowing * zswap_writeback_entry() to pin the swap entry and * then validate the zswap entry against that swap entry's * tree using pointer value comparison. Only when that * is successful can the entry be dereferenced. * * 2. Usually, objects are taken off the LRU for reclaim. In * this case this isn't possible, because if reclaim fails * for whatever reason, we have no means of knowing if the * entry is alive to put it back on the LRU. * * So rotate it before dropping the lock. If the entry is * written back or invalidated, the free path will unlink * it. For failures, rotation is the right thing as well. * * Temporary failures, where the same entry should be tried * again immediately, almost never happen for this shrinker. * We don't do any trylocking; -ENOMEM comes closest, * but that's extremely rare and doesn't happen spuriously * either. Don't bother distinguishing this case. */ list_move_tail(item, &l->list); /* * Once the lru lock is dropped, the entry might get freed. The * swpentry is copied to the stack, and entry isn't deref'd again * until the entry is verified to still be alive in the tree. */ swpentry = entry->swpentry; /* * It's safe to drop the lock here because we return either * LRU_REMOVED_RETRY, LRU_RETRY or LRU_STOP. */ spin_unlock(&l->lock); writeback_result = zswap_writeback_entry(entry, swpentry); if (writeback_result) { zswap_reject_reclaim_fail++; ret = LRU_RETRY; /* * Encountering a page already in swap cache is a sign that we are shrinking * into the warmer region. We should terminate shrinking (if we're in the dynamic * shrinker context). */ if (writeback_result == -EEXIST && encountered_page_in_swapcache) { ret = LRU_STOP; *encountered_page_in_swapcache = true; } } else { zswap_written_back_pages++; } return ret; } static unsigned long zswap_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) { unsigned long shrink_ret; bool encountered_page_in_swapcache = false; if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(sc->memcg)) { sc->nr_scanned = 0; return SHRINK_STOP; } shrink_ret = list_lru_shrink_walk(&zswap_list_lru, sc, &shrink_memcg_cb, &encountered_page_in_swapcache); if (encountered_page_in_swapcache) return SHRINK_STOP; return shrink_ret ? shrink_ret : SHRINK_STOP; } static unsigned long zswap_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) { struct mem_cgroup *memcg = sc->memcg; struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid)); atomic_long_t *nr_disk_swapins = &lruvec->zswap_lruvec_state.nr_disk_swapins; unsigned long nr_backing, nr_stored, nr_freeable, nr_disk_swapins_cur, nr_remain; if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg)) return 0; /* * The shrinker resumes swap writeback, which will enter block * and may enter fs. XXX: Harmonize with vmscan.c __GFP_FS * rules (may_enter_fs()), which apply on a per-folio basis. */ if (!gfp_has_io_fs(sc->gfp_mask)) return 0; /* * For memcg, use the cgroup-wide ZSWAP stats since we don't * have them per-node and thus per-lruvec. Careful if memcg is * runtime-disabled: we can get sc->memcg == NULL, which is ok * for the lruvec, but not for memcg_page_state(). * * Without memcg, use the zswap pool-wide metrics. */ if (!mem_cgroup_disabled()) { mem_cgroup_flush_stats(memcg); nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT; nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED); } else { nr_backing = zswap_total_pages(); nr_stored = atomic_long_read(&zswap_stored_pages); } if (!nr_stored) return 0; nr_freeable = list_lru_shrink_count(&zswap_list_lru, sc); if (!nr_freeable) return 0; /* * Subtract from the lru size the number of pages that are recently swapped * in from disk. The idea is that had we protect the zswap's LRU by this * amount of pages, these disk swapins would not have happened. */ nr_disk_swapins_cur = atomic_long_read(nr_disk_swapins); do { if (nr_freeable >= nr_disk_swapins_cur) nr_remain = 0; else nr_remain = nr_disk_swapins_cur - nr_freeable; } while (!atomic_long_try_cmpxchg( nr_disk_swapins, &nr_disk_swapins_cur, nr_remain)); nr_freeable -= nr_disk_swapins_cur - nr_remain; if (!nr_freeable) return 0; /* * Scale the number of freeable pages by the memory saving factor. * This ensures that the better zswap compresses memory, the fewer * pages we will evict to swap (as it will otherwise incur IO for * relatively small memory saving). */ return mult_frac(nr_freeable, nr_backing, nr_stored); } static struct shrinker *zswap_alloc_shrinker(void) { struct shrinker *shrinker; shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap"); if (!shrinker) return NULL; shrinker->scan_objects = zswap_shrinker_scan; shrinker->count_objects = zswap_shrinker_count; shrinker->batch = 0; shrinker->seeks = DEFAULT_SEEKS; return shrinker; } static int shrink_memcg(struct mem_cgroup *memcg) { int nid, shrunk = 0, scanned = 0; if (!mem_cgroup_zswap_writeback_enabled(memcg)) return -ENOENT; /* * Skip zombies because their LRUs are reparented and we would be * reclaiming from the parent instead of the dead memcg. */ if (memcg && !mem_cgroup_online(memcg)) return -ENOENT; for_each_node_state(nid, N_NORMAL_MEMORY) { unsigned long nr_to_walk = 1; shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg, &shrink_memcg_cb, NULL, &nr_to_walk); scanned += 1 - nr_to_walk; } if (!scanned) return -ENOENT; return shrunk ? 0 : -EAGAIN; } static void shrink_worker(struct work_struct *w) { struct mem_cgroup *memcg; int ret, failures = 0, attempts = 0; unsigned long thr; /* Reclaim down to the accept threshold */ thr = zswap_accept_thr_pages(); /* * Global reclaim will select cgroup in a round-robin fashion from all * online memcgs, but memcgs that have no pages in zswap and * writeback-disabled memcgs (memory.zswap.writeback=0) are not * candidates for shrinking. * * Shrinking will be aborted if we encounter the following * MAX_RECLAIM_RETRIES times: * - No writeback-candidate memcgs found in a memcg tree walk. * - Shrinking a writeback-candidate memcg failed. * * We save iteration cursor memcg into zswap_next_shrink, * which can be modified by the offline memcg cleaner * zswap_memcg_offline_cleanup(). * * Since the offline cleaner is called only once, we cannot leave an * offline memcg reference in zswap_next_shrink. * We can rely on the cleaner only if we get online memcg under lock. * * If we get an offline memcg, we cannot determine if the cleaner has * already been called or will be called later. We must put back the * reference before returning from this function. Otherwise, the * offline memcg left in zswap_next_shrink will hold the reference * until the next run of shrink_worker(). */ do { /* * Start shrinking from the next memcg after zswap_next_shrink. * When the offline cleaner has already advanced the cursor, * advancing the cursor here overlooks one memcg, but this * should be negligibly rare. * * If we get an online memcg, keep the extra reference in case * the original one obtained by mem_cgroup_iter() is dropped by * zswap_memcg_offline_cleanup() while we are shrinking the * memcg. */ spin_lock(&zswap_shrink_lock); do { memcg = mem_cgroup_iter(NULL, zswap_next_shrink, NULL); zswap_next_shrink = memcg; } while (memcg && !mem_cgroup_tryget_online(memcg)); spin_unlock(&zswap_shrink_lock); if (!memcg) { /* * Continue shrinking without incrementing failures if * we found candidate memcgs in the last tree walk. */ if (!attempts && ++failures == MAX_RECLAIM_RETRIES) break; attempts = 0; goto resched; } ret = shrink_memcg(memcg); /* drop the extra reference */ mem_cgroup_put(memcg); /* * There are no writeback-candidate pages in the memcg. * This is not an issue as long as we can find another memcg * with pages in zswap. Skip this without incrementing attempts * and failures. */ if (ret == -ENOENT) continue; ++attempts; if (ret && ++failures == MAX_RECLAIM_RETRIES) break; resched: cond_resched(); } while (zswap_total_pages() > thr); } /********************************* * main API **********************************/ static bool zswap_store_page(struct page *page, struct obj_cgroup *objcg, struct zswap_pool *pool) { swp_entry_t page_swpentry = page_swap_entry(page); struct zswap_entry *entry, *old; /* allocate entry */ entry = zswap_entry_cache_alloc(GFP_KERNEL, page_to_nid(page)); if (!entry) { zswap_reject_kmemcache_fail++; return false; } if (!zswap_compress(page, entry, pool)) goto compress_failed; old = xa_store(swap_zswap_tree(page_swpentry), swp_offset(page_swpentry), entry, GFP_KERNEL); if (xa_is_err(old)) { int err = xa_err(old); WARN_ONCE(err != -ENOMEM, "unexpected xarray error: %d\n", err); zswap_reject_alloc_fail++; goto store_failed; } /* * We may have had an existing entry that became stale when * the folio was redirtied and now the new version is being * swapped out. Get rid of the old. */ if (old) zswap_entry_free(old); /* * The entry is successfully compressed and stored in the tree, there is * no further possibility of failure. Grab refs to the pool and objcg, * charge zswap memory, and increment zswap_stored_pages. * The opposite actions will be performed by zswap_entry_free() * when the entry is removed from the tree. */ zswap_pool_get(pool); if (objcg) { obj_cgroup_get(objcg); obj_cgroup_charge_zswap(objcg, entry->length); } atomic_long_inc(&zswap_stored_pages); if (entry->length == PAGE_SIZE) atomic_long_inc(&zswap_stored_incompressible_pages); /* * We finish initializing the entry while it's already in xarray. * This is safe because: * * 1. Concurrent stores and invalidations are excluded by folio lock. * * 2. Writeback is excluded by the entry not being on the LRU yet. * The publishing order matters to prevent writeback from seeing * an incoherent entry. */ entry->pool = pool; entry->swpentry = page_swpentry; entry->objcg = objcg; entry->referenced = true; if (entry->length) { INIT_LIST_HEAD(&entry->lru); zswap_lru_add(&zswap_list_lru, entry); } return true; store_failed: zs_free(pool->zs_pool, entry->handle); compress_failed: zswap_entry_cache_free(entry); return false; } bool zswap_store(struct folio *folio) { long nr_pages = folio_nr_pages(folio); swp_entry_t swp = folio->swap; struct obj_cgroup *objcg = NULL; struct mem_cgroup *memcg = NULL; struct zswap_pool *pool; bool ret = false; long index; VM_WARN_ON_ONCE(!folio_test_locked(folio)); VM_WARN_ON_ONCE(!folio_test_swapcache(folio)); if (!zswap_enabled) goto check_old; objcg = get_obj_cgroup_from_folio(folio); if (objcg && !obj_cgroup_may_zswap(objcg)) { memcg = get_mem_cgroup_from_objcg(objcg); if (shrink_memcg(memcg)) { mem_cgroup_put(memcg); goto put_objcg; } mem_cgroup_put(memcg); } if (zswap_check_limits()) goto put_objcg; pool = zswap_pool_current_get(); if (!pool) goto put_objcg; if (objcg) { memcg = get_mem_cgroup_from_objcg(objcg); if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) { mem_cgroup_put(memcg); goto put_pool; } mem_cgroup_put(memcg); } for (index = 0; index < nr_pages; ++index) { struct page *page = folio_page(folio, index); if (!zswap_store_page(page, objcg, pool)) goto put_pool; } if (objcg) count_objcg_events(objcg, ZSWPOUT, nr_pages); count_vm_events(ZSWPOUT, nr_pages); ret = true; put_pool: zswap_pool_put(pool); put_objcg: obj_cgroup_put(objcg); if (!ret && zswap_pool_reached_full) queue_work(shrink_wq, &zswap_shrink_work); check_old: /* * If the zswap store fails or zswap is disabled, we must invalidate * the possibly stale entries which were previously stored at the * offsets corresponding to each page of the folio. Otherwise, * writeback could overwrite the new data in the swapfile. */ if (!ret) { unsigned type = swp_type(swp); pgoff_t offset = swp_offset(swp); struct zswap_entry *entry; struct xarray *tree; for (index = 0; index < nr_pages; ++index) { tree = swap_zswap_tree(swp_entry(type, offset + index)); entry = xa_erase(tree, offset + index); if (entry) zswap_entry_free(entry); } } return ret; } /** * zswap_load() - load a folio from zswap * @folio: folio to load * * Return: 0 on success, with the folio unlocked and marked up-to-date, or one * of the following error codes: * * -EIO: if the swapped out content was in zswap, but could not be loaded * into the page due to a decompression failure. The folio is unlocked, but * NOT marked up-to-date, so that an IO error is emitted (e.g. do_swap_page() * will SIGBUS). * * -EINVAL: if the swapped out content was in zswap, but the page belongs * to a large folio, which is not supported by zswap. The folio is unlocked, * but NOT marked up-to-date, so that an IO error is emitted (e.g. * do_swap_page() will SIGBUS). * * -ENOENT: if the swapped out content was not in zswap. The folio remains * locked on return. */ int zswap_load(struct folio *folio) { swp_entry_t swp = folio->swap; pgoff_t offset = swp_offset(swp); bool swapcache = folio_test_swapcache(folio); struct xarray *tree = swap_zswap_tree(swp); struct zswap_entry *entry; VM_WARN_ON_ONCE(!folio_test_locked(folio)); if (zswap_never_enabled()) return -ENOENT; /* * Large folios should not be swapped in while zswap is being used, as * they are not properly handled. Zswap does not properly load large * folios, and a large folio may only be partially in zswap. */ if (WARN_ON_ONCE(folio_test_large(folio))) { folio_unlock(folio); return -EINVAL; } entry = xa_load(tree, offset); if (!entry) return -ENOENT; if (!zswap_decompress(entry, folio)) { folio_unlock(folio); return -EIO; } folio_mark_uptodate(folio); count_vm_event(ZSWPIN); if (entry->objcg) count_objcg_events(entry->objcg, ZSWPIN, 1); /* * When reading into the swapcache, invalidate our entry. The * swapcache can be the authoritative owner of the page and * its mappings, and the pressure that results from having two * in-memory copies outweighs any benefits of caching the * compression work. * * (Most swapins go through the swapcache. The notable * exception is the singleton fault on SWP_SYNCHRONOUS_IO * files, which reads into a private page and may free it if * the fault fails. We remain the primary owner of the entry.) */ if (swapcache) { folio_mark_dirty(folio); xa_erase(tree, offset); zswap_entry_free(entry); } folio_unlock(folio); return 0; } void zswap_invalidate(swp_entry_t swp) { pgoff_t offset = swp_offset(swp); struct xarray *tree = swap_zswap_tree(swp); struct zswap_entry *entry; if (xa_empty(tree)) return; entry = xa_erase(tree, offset); if (entry) zswap_entry_free(entry); } int zswap_swapon(int type, unsigned long nr_pages) { struct xarray *trees, *tree; unsigned int nr, i; nr = DIV_ROUND_UP(nr_pages, ZSWAP_ADDRESS_SPACE_PAGES); trees = kvzalloc_objs(*tree, nr); if (!trees) { pr_err("alloc failed, zswap disabled for swap type %d\n", type); return -ENOMEM; } for (i = 0; i < nr; i++) xa_init(trees + i); nr_zswap_trees[type] = nr; zswap_trees[type] = trees; return 0; } void zswap_swapoff(int type) { struct xarray *trees = zswap_trees[type]; unsigned int i; if (!trees) return; /* try_to_unuse() invalidated all the entries already */ for (i = 0; i < nr_zswap_trees[type]; i++) WARN_ON_ONCE(!xa_empty(trees + i)); kvfree(trees); nr_zswap_trees[type] = 0; zswap_trees[type] = NULL; } /********************************* * debugfs functions **********************************/ #ifdef CONFIG_DEBUG_FS #include <linux/debugfs.h> static struct dentry *zswap_debugfs_root; static int debugfs_get_total_size(void *data, u64 *val) { *val = zswap_total_pages() * PAGE_SIZE; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(total_size_fops, debugfs_get_total_size, NULL, "%llu\n"); static int debugfs_get_stored_pages(void *data, u64 *val) { *val = atomic_long_read(&zswap_stored_pages); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(stored_pages_fops, debugfs_get_stored_pages, NULL, "%llu\n"); static int debugfs_get_stored_incompressible_pages(void *data, u64 *val) { *val = atomic_long_read(&zswap_stored_incompressible_pages); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(stored_incompressible_pages_fops, debugfs_get_stored_incompressible_pages, NULL, "%llu\n"); static int zswap_debugfs_init(void) { if (!debugfs_initialized()) return -ENODEV; zswap_debugfs_root = debugfs_create_dir("zswap", NULL); debugfs_create_u64("pool_limit_hit", 0444, zswap_debugfs_root, &zswap_pool_limit_hit); debugfs_create_u64("reject_reclaim_fail", 0444, zswap_debugfs_root, &zswap_reject_reclaim_fail); debugfs_create_u64("reject_alloc_fail", 0444, zswap_debugfs_root, &zswap_reject_alloc_fail); debugfs_create_u64("reject_kmemcache_fail", 0444, zswap_debugfs_root, &zswap_reject_kmemcache_fail); debugfs_create_u64("reject_compress_fail", 0444, zswap_debugfs_root, &zswap_reject_compress_fail); debugfs_create_u64("reject_compress_poor", 0444, zswap_debugfs_root, &zswap_reject_compress_poor); debugfs_create_u64("decompress_fail", 0444, zswap_debugfs_root, &zswap_decompress_fail); debugfs_create_u64("written_back_pages", 0444, zswap_debugfs_root, &zswap_written_back_pages); debugfs_create_file("pool_total_size", 0444, zswap_debugfs_root, NULL, &total_size_fops); debugfs_create_file("stored_pages", 0444, zswap_debugfs_root, NULL, &stored_pages_fops); debugfs_create_file("stored_incompressible_pages", 0444, zswap_debugfs_root, NULL, &stored_incompressible_pages_fops); return 0; } #else static int zswap_debugfs_init(void) { return 0; } #endif /********************************* * module init and exit **********************************/ static int zswap_setup(void) { struct zswap_pool *pool; int ret; zswap_entry_cache = KMEM_CACHE(zswap_entry, 0); if (!zswap_entry_cache) { pr_err("entry cache creation failed\n"); goto cache_fail; } ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE, "mm/zswap_pool:prepare", zswap_cpu_comp_prepare, zswap_cpu_comp_dead); if (ret) goto hp_fail; shrink_wq = alloc_workqueue("zswap-shrink", WQ_UNBOUND|WQ_MEM_RECLAIM, 1); if (!shrink_wq) goto shrink_wq_fail; zswap_shrinker = zswap_alloc_shrinker(); if (!zswap_shrinker) goto shrinker_fail; if (list_lru_init_memcg(&zswap_list_lru, zswap_shrinker)) goto lru_fail; shrinker_register(zswap_shrinker); INIT_WORK(&zswap_shrink_work, shrink_worker); pool = __zswap_pool_create_fallback(); if (pool) { pr_info("loaded using pool %s\n", pool->tfm_name); list_add(&pool->list, &zswap_pools); zswap_has_pool = true; static_branch_enable(&zswap_ever_enabled); } else { pr_err("pool creation failed\n"); zswap_enabled = false; } if (zswap_debugfs_init()) pr_warn("debugfs initialization failed\n"); zswap_init_state = ZSWAP_INIT_SUCCEED; return 0; lru_fail: shrinker_free(zswap_shrinker); shrinker_fail: destroy_workqueue(shrink_wq); shrink_wq_fail: cpuhp_remove_multi_state(CPUHP_MM_ZSWP_POOL_PREPARE); hp_fail: kmem_cache_destroy(zswap_entry_cache); cache_fail: /* if built-in, we aren't unloaded on failure; don't allow use */ zswap_init_state = ZSWAP_INIT_FAILED; zswap_enabled = false; return -ENOMEM; } static int __init zswap_init(void) { if (!zswap_enabled) return 0; return zswap_setup(); } /* must be late so crypto has time to come up */ late_initcall(zswap_init); MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>"); MODULE_DESCRIPTION("Compressed cache for swap pages");
18 16 1 16 16 16 16 15 16 20 8 16 16 14 16 15 16 10 19 3 16 1 1 7 7 2 2 2 2 1 1 20 20 19 7 17 4 20 20 2 18 19 19 19 4 3 19 1 19 1 18 2 1 1 1 1 2 18 1 1 1 18 16 16 2 2 2 1 1 21 16 16 21 1 10 10 10 13 10 9 9 8 1 8 8 8 8 8 8 8 10 9 9 9 9 1 1 1 1 1 1 5 5 5 7 1 1 1 1 1 17 15 2 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 // SPDX-License-Identifier: GPL-2.0-only /* * net/sched/sch_sfb.c Stochastic Fair Blue * * Copyright (c) 2008-2011 Juliusz Chroboczek <jch@pps.jussieu.fr> * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com> * * W. Feng, D. Kandlur, D. Saha, K. Shin. Blue: * A New Class of Active Queue Management Algorithms. * U. Michigan CSE-TR-387-99, April 1999. * * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/random.h> #include <linux/siphash.h> #include <net/ip.h> #include <net/pkt_sched.h> #include <net/pkt_cls.h> #include <net/inet_ecn.h> /* * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level) * This implementation uses L = 8 and N = 16 * This permits us to split one 32bit hash (provided per packet by rxhash or * external classifier) into 8 subhashes of 4 bits. */ #define SFB_BUCKET_SHIFT 4 #define SFB_NUMBUCKETS (1 << SFB_BUCKET_SHIFT) /* N bins per Level */ #define SFB_BUCKET_MASK (SFB_NUMBUCKETS - 1) #define SFB_LEVELS (32 / SFB_BUCKET_SHIFT) /* L */ /* SFB algo uses a virtual queue, named "bin" */ struct sfb_bucket { u16 qlen; /* length of virtual queue */ u16 p_mark; /* marking probability */ }; /* We use a double buffering right before hash change * (Section 4.4 of SFB reference : moving hash functions) */ struct sfb_bins { siphash_key_t perturbation; /* siphash key */ struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS]; }; struct sfb_sched_data { struct Qdisc *qdisc; struct tcf_proto __rcu *filter_list; struct tcf_block *block; unsigned long rehash_interval; unsigned long warmup_time; /* double buffering warmup time in jiffies */ u32 max; u32 bin_size; /* maximum queue length per bin */ u32 increment; /* d1 */ u32 decrement; /* d2 */ u32 limit; /* HARD maximal queue length */ u32 penalty_rate; u32 penalty_burst; u32 tokens_avail; unsigned long rehash_time; unsigned long token_time; u8 slot; /* current active bins (0 or 1) */ bool double_buffering; struct sfb_bins bins[2]; struct { u32 earlydrop; u32 penaltydrop; u32 bucketdrop; u32 queuedrop; u32 childdrop; /* drops in child qdisc */ u32 marked; /* ECN mark */ } stats; }; /* * Each queued skb might be hashed on one or two bins * We store in skb_cb the two hash values. * (A zero value means double buffering was not used) */ struct sfb_skb_cb { u32 hashes[2]; }; static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb) { qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb)); return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data; } /* * If using 'internal' SFB flow classifier, hash comes from skb rxhash * If using external classifier, hash comes from the classid. */ static u32 sfb_hash(const struct sk_buff *skb, u32 slot) { return sfb_skb_cb(skb)->hashes[slot]; } /* Probabilities are coded as Q0.16 fixed-point values, * with 0xFFFF representing 65535/65536 (almost 1.0) * Addition and subtraction are saturating in [0, 65535] */ static u32 prob_plus(u32 p1, u32 p2) { u32 res = p1 + p2; return min_t(u32, res, SFB_MAX_PROB); } static u32 prob_minus(u32 p1, u32 p2) { return p1 > p2 ? p1 - p2 : 0; } static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) { int i; struct sfb_bucket *b = &q->bins[slot].bins[0][0]; for (i = 0; i < SFB_LEVELS; i++) { u32 hash = sfbhash & SFB_BUCKET_MASK; sfbhash >>= SFB_BUCKET_SHIFT; if (b[hash].qlen < 0xFFFF) b[hash].qlen++; b += SFB_NUMBUCKETS; /* next level */ } } static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q) { u32 sfbhash; sfbhash = cb->hashes[0]; if (sfbhash) increment_one_qlen(sfbhash, 0, q); sfbhash = cb->hashes[1]; if (sfbhash) increment_one_qlen(sfbhash, 1, q); } static void decrement_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) { int i; struct sfb_bucket *b = &q->bins[slot].bins[0][0]; for (i = 0; i < SFB_LEVELS; i++) { u32 hash = sfbhash & SFB_BUCKET_MASK; sfbhash >>= SFB_BUCKET_SHIFT; if (b[hash].qlen > 0) b[hash].qlen--; b += SFB_NUMBUCKETS; /* next level */ } } static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) { u32 sfbhash; sfbhash = sfb_hash(skb, 0); if (sfbhash) decrement_one_qlen(sfbhash, 0, q); sfbhash = sfb_hash(skb, 1); if (sfbhash) decrement_one_qlen(sfbhash, 1, q); } static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q) { b->p_mark = prob_minus(b->p_mark, q->decrement); } static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q) { b->p_mark = prob_plus(b->p_mark, q->increment); } static void sfb_zero_all_buckets(struct sfb_sched_data *q) { memset(&q->bins, 0, sizeof(q->bins)); } /* * compute max qlen, max p_mark, and avg p_mark */ static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q) { int i; u32 qlen = 0, prob = 0, totalpm = 0; const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0]; for (i = 0; i < SFB_LEVELS * SFB_NUMBUCKETS; i++) { if (qlen < b->qlen) qlen = b->qlen; totalpm += b->p_mark; if (prob < b->p_mark) prob = b->p_mark; b++; } *prob_r = prob; *avgpm_r = totalpm / (SFB_LEVELS * SFB_NUMBUCKETS); return qlen; } static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q) { get_random_bytes(&q->bins[slot].perturbation, sizeof(q->bins[slot].perturbation)); } static void sfb_swap_slot(struct sfb_sched_data *q) { sfb_init_perturbation(q->slot, q); q->slot ^= 1; q->double_buffering = false; } /* Non elastic flows are allowed to use part of the bandwidth, expressed * in "penalty_rate" packets per second, with "penalty_burst" burst */ static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q) { if (q->penalty_rate == 0 || q->penalty_burst == 0) return true; if (q->tokens_avail < 1) { unsigned long age = min(10UL * HZ, jiffies - q->token_time); q->tokens_avail = (age * q->penalty_rate) / HZ; if (q->tokens_avail > q->penalty_burst) q->tokens_avail = q->penalty_burst; q->token_time = jiffies; if (q->tokens_avail < 1) return true; } q->tokens_avail--; return false; } static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl, int *qerr, u32 *salt) { struct tcf_result res; int result; result = tcf_classify(skb, NULL, fl, &res, false); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_STOLEN: case TC_ACT_QUEUED: case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; fallthrough; case TC_ACT_SHOT: return false; } #endif *salt = TC_H_MIN(res.classid); return true; } return false; } static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { enum skb_drop_reason reason = SKB_DROP_REASON_QDISC_OVERLIMIT; struct sfb_sched_data *q = qdisc_priv(sch); unsigned int len = qdisc_pkt_len(skb); struct Qdisc *child = q->qdisc; struct tcf_proto *fl; struct sfb_skb_cb cb; int i; u32 p_min = ~0; u32 minqlen = ~0; u32 r, sfbhash; u32 slot = q->slot; int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; if (unlikely(sch->q.qlen >= q->limit)) { qdisc_qstats_overlimit(sch); q->stats.queuedrop++; goto drop; } if (q->rehash_interval > 0) { unsigned long limit = q->rehash_time + q->rehash_interval; if (unlikely(time_after(jiffies, limit))) { sfb_swap_slot(q); q->rehash_time = jiffies; } else if (unlikely(!q->double_buffering && q->warmup_time > 0 && time_after(jiffies, limit - q->warmup_time))) { q->double_buffering = true; } } fl = rcu_dereference_bh(q->filter_list); if (fl) { u32 salt; /* If using external classifiers, get result and record it. */ if (!sfb_classify(skb, fl, &ret, &salt)) goto other_drop; sfbhash = siphash_1u32(salt, &q->bins[slot].perturbation); } else { sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation); } if (!sfbhash) sfbhash = 1; sfb_skb_cb(skb)->hashes[slot] = sfbhash; for (i = 0; i < SFB_LEVELS; i++) { u32 hash = sfbhash & SFB_BUCKET_MASK; struct sfb_bucket *b = &q->bins[slot].bins[i][hash]; sfbhash >>= SFB_BUCKET_SHIFT; if (b->qlen == 0) decrement_prob(b, q); else if (b->qlen >= q->bin_size) increment_prob(b, q); if (minqlen > b->qlen) minqlen = b->qlen; if (p_min > b->p_mark) p_min = b->p_mark; } slot ^= 1; sfb_skb_cb(skb)->hashes[slot] = 0; if (unlikely(minqlen >= q->max)) { qdisc_qstats_overlimit(sch); q->stats.bucketdrop++; goto drop; } if (unlikely(p_min >= SFB_MAX_PROB)) { /* Inelastic flow */ if (q->double_buffering) { sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation); if (!sfbhash) sfbhash = 1; sfb_skb_cb(skb)->hashes[slot] = sfbhash; for (i = 0; i < SFB_LEVELS; i++) { u32 hash = sfbhash & SFB_BUCKET_MASK; struct sfb_bucket *b = &q->bins[slot].bins[i][hash]; sfbhash >>= SFB_BUCKET_SHIFT; if (b->qlen == 0) decrement_prob(b, q); else if (b->qlen >= q->bin_size) increment_prob(b, q); } } if (sfb_rate_limit(skb, q)) { qdisc_qstats_overlimit(sch); q->stats.penaltydrop++; goto drop; } goto enqueue; } r = get_random_u16() & SFB_MAX_PROB; reason = SKB_DROP_REASON_QDISC_CONGESTED; if (unlikely(r < p_min)) { if (unlikely(p_min > SFB_MAX_PROB / 2)) { /* If we're marking that many packets, then either * this flow is unresponsive, or we're badly congested. * In either case, we want to start dropping packets. */ if (r < (p_min - SFB_MAX_PROB / 2) * 2) { q->stats.earlydrop++; goto drop; } } if (INET_ECN_set_ce(skb)) { q->stats.marked++; } else { q->stats.earlydrop++; goto drop; } } enqueue: memcpy(&cb, sfb_skb_cb(skb), sizeof(cb)); ret = qdisc_enqueue(skb, child, to_free); if (likely(ret == NET_XMIT_SUCCESS)) { sch->qstats.backlog += len; sch->q.qlen++; increment_qlen(&cb, q); } else if (net_xmit_drop_count(ret)) { q->stats.childdrop++; qdisc_qstats_drop(sch); } return ret; drop: qdisc_drop_reason(skb, sch, to_free, reason); return NET_XMIT_CN; other_drop: if (ret & __NET_XMIT_BYPASS) qdisc_qstats_drop(sch); kfree_skb(skb); return ret; } static struct sk_buff *sfb_dequeue(struct Qdisc *sch) { struct sfb_sched_data *q = qdisc_priv(sch); struct Qdisc *child = q->qdisc; struct sk_buff *skb; skb = child->dequeue(q->qdisc); if (skb) { qdisc_bstats_update(sch, skb); qdisc_qstats_backlog_dec(sch, skb); sch->q.qlen--; decrement_qlen(skb, q); } return skb; } static struct sk_buff *sfb_peek(struct Qdisc *sch) { struct sfb_sched_data *q = qdisc_priv(sch); struct Qdisc *child = q->qdisc; return child->ops->peek(child); } /* No sfb_drop -- impossible since the child doesn't return the dropped skb. */ static void sfb_reset(struct Qdisc *sch) { struct sfb_sched_data *q = qdisc_priv(sch); if (likely(q->qdisc)) qdisc_reset(q->qdisc); q->slot = 0; q->double_buffering = false; sfb_zero_all_buckets(q); sfb_init_perturbation(0, q); } static void sfb_destroy(struct Qdisc *sch) { struct sfb_sched_data *q = qdisc_priv(sch); tcf_block_put(q->block); qdisc_put(q->qdisc); } static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = { [TCA_SFB_PARMS] = { .len = sizeof(struct tc_sfb_qopt) }, }; static const struct tc_sfb_qopt sfb_default_ops = { .rehash_interval = 600 * MSEC_PER_SEC, .warmup_time = 60 * MSEC_PER_SEC, .limit = 0, .max = 25, .bin_size = 20, .increment = (SFB_MAX_PROB + 500) / 1000, /* 0.1 % */ .decrement = (SFB_MAX_PROB + 3000) / 6000, .penalty_rate = 10, .penalty_burst = 20, }; static int sfb_change(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct sfb_sched_data *q = qdisc_priv(sch); struct Qdisc *child, *old; struct nlattr *tb[TCA_SFB_MAX + 1]; const struct tc_sfb_qopt *ctl = &sfb_default_ops; u32 limit; int err; if (opt) { err = nla_parse_nested_deprecated(tb, TCA_SFB_MAX, opt, sfb_policy, NULL); if (err < 0) return -EINVAL; if (tb[TCA_SFB_PARMS] == NULL) return -EINVAL; ctl = nla_data(tb[TCA_SFB_PARMS]); } limit = ctl->limit; if (limit == 0) limit = qdisc_dev(sch)->tx_queue_len; child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit, extack); if (IS_ERR(child)) return PTR_ERR(child); if (child != &noop_qdisc) qdisc_hash_add(child, true); sch_tree_lock(sch); qdisc_purge_queue(q->qdisc); old = q->qdisc; q->qdisc = child; q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval); q->warmup_time = msecs_to_jiffies(ctl->warmup_time); q->rehash_time = jiffies; q->limit = limit; q->increment = ctl->increment; q->decrement = ctl->decrement; q->max = ctl->max; q->bin_size = ctl->bin_size; q->penalty_rate = ctl->penalty_rate; q->penalty_burst = ctl->penalty_burst; q->tokens_avail = ctl->penalty_burst; q->token_time = jiffies; q->slot = 0; q->double_buffering = false; sfb_zero_all_buckets(q); sfb_init_perturbation(0, q); sfb_init_perturbation(1, q); sch_tree_unlock(sch); qdisc_put(old); return 0; } static int sfb_init(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct sfb_sched_data *q = qdisc_priv(sch); int err; err = tcf_block_get(&q->block, &q->filter_list, sch, extack); if (err) return err; q->qdisc = &noop_qdisc; return sfb_change(sch, opt, extack); } static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb) { struct sfb_sched_data *q = qdisc_priv(sch); struct nlattr *opts; struct tc_sfb_qopt opt = { .rehash_interval = jiffies_to_msecs(q->rehash_interval), .warmup_time = jiffies_to_msecs(q->warmup_time), .limit = q->limit, .max = q->max, .bin_size = q->bin_size, .increment = q->increment, .decrement = q->decrement, .penalty_rate = q->penalty_rate, .penalty_burst = q->penalty_burst, }; sch->qstats.backlog = q->qdisc->qstats.backlog; opts = nla_nest_start_noflag(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt)) goto nla_put_failure; return nla_nest_end(skb, opts); nla_put_failure: nla_nest_cancel(skb, opts); return -EMSGSIZE; } static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d) { struct sfb_sched_data *q = qdisc_priv(sch); struct tc_sfb_xstats st = { .earlydrop = q->stats.earlydrop, .penaltydrop = q->stats.penaltydrop, .bucketdrop = q->stats.bucketdrop, .queuedrop = q->stats.queuedrop, .childdrop = q->stats.childdrop, .marked = q->stats.marked, }; st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q); return gnet_stats_copy_app(d, &st, sizeof(st)); } static int sfb_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { return -ENOSYS; } static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old, struct netlink_ext_ack *extack) { struct sfb_sched_data *q = qdisc_priv(sch); if (new == NULL) new = &noop_qdisc; *old = qdisc_replace(sch, new, &q->qdisc); return 0; } static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg) { struct sfb_sched_data *q = qdisc_priv(sch); return q->qdisc; } static unsigned long sfb_find(struct Qdisc *sch, u32 classid) { return 1; } static void sfb_unbind(struct Qdisc *sch, unsigned long arg) { } static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca, unsigned long *arg, struct netlink_ext_ack *extack) { return -ENOSYS; } static int sfb_delete(struct Qdisc *sch, unsigned long cl, struct netlink_ext_ack *extack) { return -ENOSYS; } static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker) { if (!walker->stop) { tc_qdisc_stats_dump(sch, 1, walker); } } static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl, struct netlink_ext_ack *extack) { struct sfb_sched_data *q = qdisc_priv(sch); if (cl) return NULL; return q->block; } static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent, u32 classid) { return 0; } static const struct Qdisc_class_ops sfb_class_ops = { .graft = sfb_graft, .leaf = sfb_leaf, .find = sfb_find, .change = sfb_change_class, .delete = sfb_delete, .walk = sfb_walk, .tcf_block = sfb_tcf_block, .bind_tcf = sfb_bind, .unbind_tcf = sfb_unbind, .dump = sfb_dump_class, }; static struct Qdisc_ops sfb_qdisc_ops __read_mostly = { .id = "sfb", .priv_size = sizeof(struct sfb_sched_data), .cl_ops = &sfb_class_ops, .enqueue = sfb_enqueue, .dequeue = sfb_dequeue, .peek = sfb_peek, .init = sfb_init, .reset = sfb_reset, .destroy = sfb_destroy, .change = sfb_change, .dump = sfb_dump, .dump_stats = sfb_dump_stats, .owner = THIS_MODULE, }; MODULE_ALIAS_NET_SCH("sfb"); static int __init sfb_module_init(void) { return register_qdisc(&sfb_qdisc_ops); } static void __exit sfb_module_exit(void) { unregister_qdisc(&sfb_qdisc_ops); } module_init(sfb_module_init) module_exit(sfb_module_exit) MODULE_DESCRIPTION("Stochastic Fair Blue queue discipline"); MODULE_AUTHOR("Juliusz Chroboczek"); MODULE_AUTHOR("Eric Dumazet"); MODULE_LICENSE("GPL");
4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Media device * * Copyright (C) 2010 Nokia Corporation * * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com> * Sakari Ailus <sakari.ailus@iki.fi> */ #ifndef _MEDIA_DEVICE_H #define _MEDIA_DEVICE_H #include <linux/atomic.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <media/media-devnode.h> #include <media/media-entity.h> struct ida; struct media_device; /** * struct media_entity_notify - Media Entity Notify * * @list: List head * @notify_data: Input data to invoke the callback * @notify: Callback function pointer * * Drivers may register a callback to take action when new entities get * registered with the media device. This handler is intended for creating * links between existing entities and should not create entities and register * them. */ struct media_entity_notify { struct list_head list; void *notify_data; void (*notify)(struct media_entity *entity, void *notify_data); }; /** * struct media_device_ops - Media device operations * @link_notify: Link state change notification callback. This callback is * called with the graph_mutex held. * @req_alloc: Allocate a request. Set this if you need to allocate a struct * larger then struct media_request. @req_alloc and @req_free must * either both be set or both be NULL. * @req_free: Free a request. Set this if @req_alloc was set as well, leave * to NULL otherwise. * @req_validate: Validate a request, but do not queue yet. The req_queue_mutex * lock is held when this op is called. * @req_queue: Queue a validated request, cannot fail. If something goes * wrong when queueing this request then it should be marked * as such internally in the driver and any related buffers * must eventually return to vb2 with state VB2_BUF_STATE_ERROR. * The req_queue_mutex lock is held when this op is called. * It is important that vb2 buffer objects are queued last after * all other object types are queued: queueing a buffer kickstarts * the request processing, so all other objects related to the * request (and thus the buffer) must be available to the driver. * And once a buffer is queued, then the driver can complete * or delete objects from the request before req_queue exits. */ struct media_device_ops { int (*link_notify)(struct media_link *link, u32 flags, unsigned int notification); struct media_request *(*req_alloc)(struct media_device *mdev); void (*req_free)(struct media_request *req); int (*req_validate)(struct media_request *req); void (*req_queue)(struct media_request *req); }; /** * struct media_device - Media device * @dev: Parent device * @devnode: Media device node * @driver_name: Optional device driver name. If not set, calls to * %MEDIA_IOC_DEVICE_INFO will return ``dev->driver->name``. * This is needed for USB drivers for example, as otherwise * they'll all appear as if the driver name was "usb". * @model: Device model name * @serial: Device serial number (optional) * @bus_info: Unique and stable device location identifier * @hw_revision: Hardware device revision * @topology_version: Monotonic counter for storing the version of the graph * topology. Should be incremented each time the topology changes. * @id: Unique ID used on the last registered graph object * @entity_internal_idx: Unique internal entity ID used by the graph traversal * algorithms * @entity_internal_idx_max: Allocated internal entity indices * @entities: List of registered entities * @interfaces: List of registered interfaces * @pads: List of registered pads * @links: List of registered links * @entity_notify: List of registered entity_notify callbacks * @graph_mutex: Protects access to struct media_device data * @pm_count_walk: Graph walk for power state walk. Access serialised using * graph_mutex. * * @source_priv: Driver Private data for enable/disable source handlers * @enable_source: Enable Source Handler function pointer * @disable_source: Disable Source Handler function pointer * * @ops: Operation handler callbacks * @req_queue_mutex: Serialise the MEDIA_REQUEST_IOC_QUEUE ioctl w.r.t. * other operations that stop or start streaming. * @num_requests: number of associated requests * @num_request_objects: number of associated request objects * @media_dir: DebugFS media directory * @request_id: Used to generate unique request IDs * * This structure represents an abstract high-level media device. It allows easy * access to entities and provides basic media device-level support. The * structure can be allocated directly or embedded in a larger structure. * * The parent @dev is a physical device. It must be set before registering the * media device. * * @model is a descriptive model name exported through sysfs. It doesn't have to * be unique. * * @enable_source is a handler to find source entity for the * sink entity and activate the link between them if source * entity is free. Drivers should call this handler before * accessing the source. * * @disable_source is a handler to find source entity for the * sink entity and deactivate the link between them. Drivers * should call this handler to release the source. * * Use-case: find tuner entity connected to the decoder * entity and check if it is available, and activate the * link between them from @enable_source and deactivate * from @disable_source. * * .. note:: * * Bridge driver is expected to implement and set the * handler when &media_device is registered or when * bridge driver finds the media_device during probe. * Bridge driver sets source_priv with information * necessary to run @enable_source and @disable_source handlers. * Callers should hold graph_mutex to access and call @enable_source * and @disable_source handlers. */ struct media_device { /* dev->driver_data points to this struct. */ struct device *dev; struct media_devnode *devnode; char model[32]; char driver_name[32]; char serial[40]; char bus_info[32]; u32 hw_revision; u64 topology_version; u32 id; struct ida entity_internal_idx; int entity_internal_idx_max; struct list_head entities; struct list_head interfaces; struct list_head pads; struct list_head links; /* notify callback list invoked when a new entity is registered */ struct list_head entity_notify; /* Serializes graph operations. */ struct mutex graph_mutex; struct media_graph pm_count_walk; void *source_priv; int (*enable_source)(struct media_entity *entity, struct media_pipeline *pipe); void (*disable_source)(struct media_entity *entity); const struct media_device_ops *ops; struct mutex req_queue_mutex; atomic_t num_requests; atomic_t num_request_objects; /* debugfs */ struct dentry *media_dir; atomic_t request_id; }; /* We don't need to include usb.h here */ struct usb_device; #ifdef CONFIG_MEDIA_CONTROLLER /* Supported link_notify @notification values. */ #define MEDIA_DEV_NOTIFY_PRE_LINK_CH 0 #define MEDIA_DEV_NOTIFY_POST_LINK_CH 1 /** * media_device_init() - Initializes a media device element * * @mdev: pointer to struct &media_device * * This function initializes the media device prior to its registration. * The media device initialization and registration is split in two functions * to avoid race conditions and make the media device available to user-space * before the media graph has been completed. * * So drivers need to first initialize the media device, register any entity * within the media device, create pad to pad links and then finally register * the media device by calling media_device_register() as a final step. * * The caller is responsible for initializing the media device before * registration. The following fields must be set: * * - dev must point to the parent device * - model must be filled with the device model name * * The bus_info field is set by media_device_init() for PCI and platform devices * if the field begins with '\0'. */ void media_device_init(struct media_device *mdev); /** * media_device_cleanup() - Cleanups a media device element * * @mdev: pointer to struct &media_device * * This function that will destroy the graph_mutex that is * initialized in media_device_init(). */ void media_device_cleanup(struct media_device *mdev); /** * __media_device_register() - Registers a media device element * * @mdev: pointer to struct &media_device * @owner: should be filled with %THIS_MODULE * * Users, should, instead, call the media_device_register() macro. * * The caller is responsible for initializing the &media_device structure * before registration. The following fields of &media_device must be set: * * - &media_device.model must be filled with the device model name as a * NUL-terminated UTF-8 string. The device/model revision must not be * stored in this field. * * The following fields are optional: * * - &media_device.serial is a unique serial number stored as a * NUL-terminated ASCII string. The field is big enough to store a GUID * in text form. If the hardware doesn't provide a unique serial number * this field must be left empty. * * - &media_device.bus_info represents the location of the device in the * system as a NUL-terminated ASCII string. For PCI/PCIe devices * &media_device.bus_info must be set to "PCI:" (or "PCIe:") followed by * the value of pci_name(). For USB devices,the usb_make_path() function * must be used. This field is used by applications to distinguish between * otherwise identical devices that don't provide a serial number. * * - &media_device.hw_revision is the hardware device revision in a * driver-specific format. When possible the revision should be formatted * with the KERNEL_VERSION() macro. * * .. note:: * * #) Upon successful registration a character device named media[0-9]+ is created. The device major and minor numbers are dynamic. The model name is exported as a sysfs attribute. * * #) Unregistering a media device that hasn't been registered is **NOT** safe. * * Return: returns zero on success or a negative error code. */ int __must_check __media_device_register(struct media_device *mdev, struct module *owner); /** * media_device_register() - Registers a media device element * * @mdev: pointer to struct &media_device * * This macro calls __media_device_register() passing %THIS_MODULE as * the __media_device_register() second argument (**owner**). */ #define media_device_register(mdev) __media_device_register(mdev, THIS_MODULE) /** * media_device_unregister() - Unregisters a media device element * * @mdev: pointer to struct &media_device * * It is safe to call this function on an unregistered (but initialised) * media device. */ void media_device_unregister(struct media_device *mdev); /** * media_device_register_entity() - registers a media entity inside a * previously registered media device. * * @mdev: pointer to struct &media_device * @entity: pointer to struct &media_entity to be registered * * Entities are identified by a unique positive integer ID. The media * controller framework will such ID automatically. IDs are not guaranteed * to be contiguous, and the ID number can change on newer Kernel versions. * So, neither the driver nor userspace should hardcode ID numbers to refer * to the entities, but, instead, use the framework to find the ID, when * needed. * * The media_entity name, type and flags fields should be initialized before * calling media_device_register_entity(). Entities embedded in higher-level * standard structures can have some of those fields set by the higher-level * framework. * * If the device has pads, media_entity_pads_init() should be called before * this function. Otherwise, the &media_entity.pad and &media_entity.num_pads * should be zeroed before calling this function. * * Entities have flags that describe the entity capabilities and state: * * %MEDIA_ENT_FL_DEFAULT * indicates the default entity for a given type. * This can be used to report the default audio and video devices or the * default camera sensor. * * .. note:: * * Drivers should set the entity function before calling this function. * Please notice that the values %MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN and * %MEDIA_ENT_F_UNKNOWN should not be used by the drivers. */ int __must_check media_device_register_entity(struct media_device *mdev, struct media_entity *entity); /** * media_device_unregister_entity() - unregisters a media entity. * * @entity: pointer to struct &media_entity to be unregistered * * All links associated with the entity and all PADs are automatically * unregistered from the media_device when this function is called. * * Unregistering an entity will not change the IDs of the other entities and * the previoully used ID will never be reused for a newly registered entities. * * When a media device is unregistered, all its entities are unregistered * automatically. No manual entities unregistration is then required. * * .. note:: * * The media_entity instance itself must be freed explicitly by * the driver if required. */ void media_device_unregister_entity(struct media_entity *entity); /** * media_device_register_entity_notify() - Registers a media entity_notify * callback * * @mdev: The media device * @nptr: The media_entity_notify * * .. note:: * * When a new entity is registered, all the registered * media_entity_notify callbacks are invoked. */ void media_device_register_entity_notify(struct media_device *mdev, struct media_entity_notify *nptr); /** * media_device_unregister_entity_notify() - Unregister a media entity notify * callback * * @mdev: The media device * @nptr: The media_entity_notify * */ void media_device_unregister_entity_notify(struct media_device *mdev, struct media_entity_notify *nptr); /* Iterate over all entities. */ #define media_device_for_each_entity(entity, mdev) \ list_for_each_entry(entity, &(mdev)->entities, graph_obj.list) /* Iterate over all interfaces. */ #define media_device_for_each_intf(intf, mdev) \ list_for_each_entry(intf, &(mdev)->interfaces, graph_obj.list) /* Iterate over all pads. */ #define media_device_for_each_pad(pad, mdev) \ list_for_each_entry(pad, &(mdev)->pads, graph_obj.list) /* Iterate over all links. */ #define media_device_for_each_link(link, mdev) \ list_for_each_entry(link, &(mdev)->links, graph_obj.list) /** * media_device_pci_init() - create and initialize a * struct &media_device from a PCI device. * * @mdev: pointer to struct &media_device * @pci_dev: pointer to struct pci_dev * @name: media device name. If %NULL, the routine will use the default * name for the pci device, given by pci_name() macro. */ void media_device_pci_init(struct media_device *mdev, struct pci_dev *pci_dev, const char *name); /** * __media_device_usb_init() - create and initialize a * struct &media_device from a PCI device. * * @mdev: pointer to struct &media_device * @udev: pointer to struct usb_device * @board_name: media device name. If %NULL, the routine will use the usb * product name, if available. * @driver_name: name of the driver. if %NULL, the routine will use the name * given by ``udev->dev->driver->name``, with is usually the wrong * thing to do. * * .. note:: * * It is better to call media_device_usb_init() instead, as * such macro fills driver_name with %KBUILD_MODNAME. */ void __media_device_usb_init(struct media_device *mdev, struct usb_device *udev, const char *board_name, const char *driver_name); #else static inline void media_device_init(struct media_device *mdev) { } static inline int media_device_register(struct media_device *mdev) { return 0; } static inline void media_device_unregister(struct media_device *mdev) { } static inline void media_device_cleanup(struct media_device *mdev) { } static inline int media_device_register_entity(struct media_device *mdev, struct media_entity *entity) { return 0; } static inline void media_device_unregister_entity(struct media_entity *entity) { } static inline void media_device_register_entity_notify( struct media_device *mdev, struct media_entity_notify *nptr) { } static inline void media_device_unregister_entity_notify( struct media_device *mdev, struct media_entity_notify *nptr) { } static inline void media_device_pci_init(struct media_device *mdev, struct pci_dev *pci_dev, char *name) { } static inline void __media_device_usb_init(struct media_device *mdev, struct usb_device *udev, char *board_name, char *driver_name) { } #endif /* CONFIG_MEDIA_CONTROLLER */ /** * media_device_usb_init() - create and initialize a * struct &media_device from a PCI device. * * @mdev: pointer to struct &media_device * @udev: pointer to struct usb_device * @name: media device name. If %NULL, the routine will use the usb * product name, if available. * * This macro calls media_device_usb_init() passing the * media_device_usb_init() **driver_name** parameter filled with * %KBUILD_MODNAME. */ #define media_device_usb_init(mdev, udev, name) \ __media_device_usb_init(mdev, udev, name, KBUILD_MODNAME) /** * media_set_bus_info() - Set bus_info field * * @bus_info: Variable where to write the bus info (char array) * @bus_info_size: Length of the bus_info * @dev: Related struct device * * Sets bus information based on &dev. This is currently done for PCI and * platform devices. dev is required to be non-NULL for this to happen. * * This function is not meant to be called from drivers. */ static inline void media_set_bus_info(char *bus_info, size_t bus_info_size, struct device *dev) { if (!dev) strscpy(bus_info, "no bus info", bus_info_size); else if (dev_is_platform(dev)) snprintf(bus_info, bus_info_size, "platform:%s", dev_name(dev)); else if (dev_is_pci(dev)) snprintf(bus_info, bus_info_size, "PCI:%s", dev_name(dev)); } #endif
1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 // SPDX-License-Identifier: GPL-2.0-only /* * "security" table * * This is for use by Mandatory Access Control (MAC) security models, * which need to be able to manage security policy in separate context * to DAC. * * Based on iptable_mangle.c * * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling * Copyright (C) 2000-2004 Netfilter Core Team <coreteam <at> netfilter.org> * Copyright (C) 2008 Red Hat, Inc., James Morris <jmorris <at> redhat.com> */ #include <linux/module.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <linux/slab.h> #include <net/ip.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("James Morris <jmorris <at> redhat.com>"); MODULE_DESCRIPTION("iptables security table, for MAC rules"); #define SECURITY_VALID_HOOKS (1 << NF_INET_LOCAL_IN) | \ (1 << NF_INET_FORWARD) | \ (1 << NF_INET_LOCAL_OUT) static const struct xt_table security_table = { .name = "security", .valid_hooks = SECURITY_VALID_HOOKS, .me = THIS_MODULE, .af = NFPROTO_IPV4, .priority = NF_IP_PRI_SECURITY, }; static struct nf_hook_ops *sectbl_ops __read_mostly; static int iptable_security_table_init(struct net *net) { struct ipt_replace *repl; int ret; repl = ipt_alloc_initial_table(&security_table); if (repl == NULL) return -ENOMEM; ret = ipt_register_table(net, &security_table, repl, sectbl_ops); kfree(repl); return ret; } static void __net_exit iptable_security_net_pre_exit(struct net *net) { ipt_unregister_table_pre_exit(net, "security"); } static void __net_exit iptable_security_net_exit(struct net *net) { ipt_unregister_table_exit(net, "security"); } static struct pernet_operations iptable_security_net_ops = { .pre_exit = iptable_security_net_pre_exit, .exit = iptable_security_net_exit, }; static int __init iptable_security_init(void) { int ret = xt_register_template(&security_table, iptable_security_table_init); if (ret < 0) return ret; sectbl_ops = xt_hook_ops_alloc(&security_table, ipt_do_table); if (IS_ERR(sectbl_ops)) { xt_unregister_template(&security_table); return PTR_ERR(sectbl_ops); } ret = register_pernet_subsys(&iptable_security_net_ops); if (ret < 0) { xt_unregister_template(&security_table); kfree(sectbl_ops); return ret; } return ret; } static void __exit iptable_security_fini(void) { unregister_pernet_subsys(&iptable_security_net_ops); kfree(sectbl_ops); xt_unregister_template(&security_table); } module_init(iptable_security_init); module_exit(iptable_security_fini);
1 40 25 1 40 40 40 22 22 22 22 17 2 2 2 2 2 2 2 2 2 2 2 2 9 3 3 3 3 9 7 7 7 7 7 20 20 17 17 17 17 15 15 15 37 37 37 37 6 37 8 5 6 6 1 8 2 2 2 8 37 37 37 37 47 14 47 33 1 32 32 32 32 32 32 29 15 32 30 16 18 5 32 4 3 3 3 44 10 6 1 5 5 2 5 2 44 2 5 5 2 2 2 2 1 4 4 1 1 22 17 5 22 17 15 17 10 8 8 8 10 10 10 10 5 5 3 1 1 1 5 5 5 5 4 4 3 2 1 1 1 5 5 44 44 5 44 9 4 9 7 9 38 38 1 38 34 34 34 30 34 14 14 14 14 14 34 30 38 4 5 6 6 5 5 4 4 2 21 15 21 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. */ #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/pkt_sched.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/timer.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/if_bonding.h> #include <linux/if_vlan.h> #include <linux/in.h> #include <net/arp.h> #include <net/ipv6.h> #include <net/ndisc.h> #include <asm/byteorder.h> #include <net/bonding.h> #include <net/bond_alb.h> static const u8 mac_v6_allmcast[ETH_ALEN + 2] __long_aligned = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x01 }; static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC; #pragma pack(1) struct learning_pkt { u8 mac_dst[ETH_ALEN]; u8 mac_src[ETH_ALEN]; __be16 type; u8 padding[ETH_ZLEN - ETH_HLEN]; }; struct arp_pkt { __be16 hw_addr_space; __be16 prot_addr_space; u8 hw_addr_len; u8 prot_addr_len; __be16 op_code; u8 mac_src[ETH_ALEN]; /* sender hardware address */ __be32 ip_src; /* sender IP address */ u8 mac_dst[ETH_ALEN]; /* target hardware address */ __be32 ip_dst; /* target IP address */ }; #pragma pack() /* Forward declaration */ static void alb_send_learning_packets(struct slave *slave, const u8 mac_addr[], bool strict_match); static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp); static void rlb_src_unlink(struct bonding *bond, u32 index); static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, u32 ip_dst_hash); static inline u8 _simple_hash(const u8 *hash_start, int hash_size) { int i; u8 hash = 0; for (i = 0; i < hash_size; i++) hash ^= hash_start[i]; return hash; } /*********************** tlb specific functions ***************************/ static inline void tlb_init_table_entry(struct tlb_client_info *entry, int save_load) { if (save_load) { entry->load_history = 1 + entry->tx_bytes / BOND_TLB_REBALANCE_INTERVAL; entry->tx_bytes = 0; } entry->tx_slave = NULL; entry->next = TLB_NULL_INDEX; entry->prev = TLB_NULL_INDEX; } static inline void tlb_init_slave(struct slave *slave) { SLAVE_TLB_INFO(slave).load = 0; SLAVE_TLB_INFO(slave).head = TLB_NULL_INDEX; } static void __tlb_clear_slave(struct bonding *bond, struct slave *slave, int save_load) { struct tlb_client_info *tx_hash_table; u32 index; /* clear slave from tx_hashtbl */ tx_hash_table = BOND_ALB_INFO(bond).tx_hashtbl; /* skip this if we've already freed the tx hash table */ if (tx_hash_table) { index = SLAVE_TLB_INFO(slave).head; while (index != TLB_NULL_INDEX) { u32 next_index = tx_hash_table[index].next; tlb_init_table_entry(&tx_hash_table[index], save_load); index = next_index; } } tlb_init_slave(slave); } static void tlb_clear_slave(struct bonding *bond, struct slave *slave, int save_load) { spin_lock_bh(&bond->mode_lock); __tlb_clear_slave(bond, slave, save_load); spin_unlock_bh(&bond->mode_lock); } /* Must be called before starting the monitor timer */ static int tlb_initialize(struct bonding *bond) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); int size = TLB_HASH_TABLE_SIZE * sizeof(struct tlb_client_info); struct tlb_client_info *new_hashtbl; int i; new_hashtbl = kzalloc(size, GFP_KERNEL); if (!new_hashtbl) return -ENOMEM; spin_lock_bh(&bond->mode_lock); bond_info->tx_hashtbl = new_hashtbl; for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0); spin_unlock_bh(&bond->mode_lock); return 0; } /* Must be called only after all slaves have been released */ static void tlb_deinitialize(struct bonding *bond) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); spin_lock_bh(&bond->mode_lock); kfree(bond_info->tx_hashtbl); bond_info->tx_hashtbl = NULL; spin_unlock_bh(&bond->mode_lock); } static long long compute_gap(struct slave *slave) { return (s64) (slave->speed << 20) - /* Convert to Megabit per sec */ (s64) (SLAVE_TLB_INFO(slave).load << 3); /* Bytes to bits */ } static struct slave *tlb_get_least_loaded_slave(struct bonding *bond) { struct slave *slave, *least_loaded; struct list_head *iter; long long max_gap; least_loaded = NULL; max_gap = LLONG_MIN; /* Find the slave with the largest gap */ bond_for_each_slave_rcu(bond, slave, iter) { if (bond_slave_can_tx(slave)) { long long gap = compute_gap(slave); if (max_gap < gap) { least_loaded = slave; max_gap = gap; } } } return least_loaded; } static struct slave *__tlb_choose_channel(struct bonding *bond, u32 hash_index, u32 skb_len) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct tlb_client_info *hash_table; struct slave *assigned_slave; hash_table = bond_info->tx_hashtbl; assigned_slave = hash_table[hash_index].tx_slave; if (!assigned_slave) { assigned_slave = tlb_get_least_loaded_slave(bond); if (assigned_slave) { struct tlb_slave_info *slave_info = &(SLAVE_TLB_INFO(assigned_slave)); u32 next_index = slave_info->head; hash_table[hash_index].tx_slave = assigned_slave; hash_table[hash_index].next = next_index; hash_table[hash_index].prev = TLB_NULL_INDEX; if (next_index != TLB_NULL_INDEX) hash_table[next_index].prev = hash_index; slave_info->head = hash_index; slave_info->load += hash_table[hash_index].load_history; } } if (assigned_slave) hash_table[hash_index].tx_bytes += skb_len; return assigned_slave; } static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index, u32 skb_len) { struct slave *tx_slave; /* We don't need to disable softirq here, because * tlb_choose_channel() is only called by bond_alb_xmit() * which already has softirq disabled. */ spin_lock(&bond->mode_lock); tx_slave = __tlb_choose_channel(bond, hash_index, skb_len); spin_unlock(&bond->mode_lock); return tx_slave; } /*********************** rlb specific functions ***************************/ /* when an ARP REPLY is received from a client update its info * in the rx_hashtbl */ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct rlb_client_info *client_info; u32 hash_index; spin_lock_bh(&bond->mode_lock); hash_index = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src)); client_info = &(bond_info->rx_hashtbl[hash_index]); if ((client_info->assigned) && (client_info->ip_src == arp->ip_dst) && (client_info->ip_dst == arp->ip_src) && (!ether_addr_equal_64bits(client_info->mac_dst, arp->mac_src))) { /* update the clients MAC address */ ether_addr_copy(client_info->mac_dst, arp->mac_src); client_info->ntt = 1; bond_info->rx_ntt = 1; } spin_unlock_bh(&bond->mode_lock); } static int rlb_arp_recv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave) { struct arp_pkt *arp, _arp; if (skb->protocol != cpu_to_be16(ETH_P_ARP)) goto out; arp = skb_header_pointer(skb, 0, sizeof(_arp), &_arp); if (!arp) goto out; /* We received an ARP from arp->ip_src. * We might have used this IP address previously (on the bonding host * itself or on a system that is bridged together with the bond). * However, if arp->mac_src is different than what is stored in * rx_hashtbl, some other host is now using the IP and we must prevent * sending out client updates with this IP address and the old MAC * address. * Clean up all hash table entries that have this address as ip_src but * have a different mac_src. */ rlb_purge_src_ip(bond, arp); if (arp->op_code == htons(ARPOP_REPLY)) { /* update rx hash table for this ARP */ rlb_update_entry_from_arp(bond, arp); slave_dbg(bond->dev, slave->dev, "Server received an ARP Reply from client\n"); } out: return RX_HANDLER_ANOTHER; } /* Caller must hold rcu_read_lock() */ static struct slave *__rlb_next_rx_slave(struct bonding *bond) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct slave *before = NULL, *rx_slave = NULL, *slave; struct list_head *iter; bool found = false; bond_for_each_slave_rcu(bond, slave, iter) { if (!bond_slave_can_tx(slave)) continue; if (!found) { if (!before || before->speed < slave->speed) before = slave; } else { if (!rx_slave || rx_slave->speed < slave->speed) rx_slave = slave; } if (slave == bond_info->rx_slave) found = true; } /* we didn't find anything after the current or we have something * better before and up to the current slave */ if (!rx_slave || (before && rx_slave->speed < before->speed)) rx_slave = before; if (rx_slave) bond_info->rx_slave = rx_slave; return rx_slave; } /* Caller must hold RTNL, rcu_read_lock is obtained only to silence checkers */ static struct slave *rlb_next_rx_slave(struct bonding *bond) { struct slave *rx_slave; ASSERT_RTNL(); rcu_read_lock(); rx_slave = __rlb_next_rx_slave(bond); rcu_read_unlock(); return rx_slave; } /* teach the switch the mac of a disabled slave * on the primary for fault tolerance * * Caller must hold RTNL */ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, const u8 addr[]) { struct slave *curr_active = rtnl_dereference(bond->curr_active_slave); if (!curr_active) return; if (!bond->alb_info.primary_is_promisc) { if (!dev_set_promiscuity(curr_active->dev, 1)) bond->alb_info.primary_is_promisc = 1; else bond->alb_info.primary_is_promisc = 0; } bond->alb_info.rlb_promisc_timeout_counter = 0; alb_send_learning_packets(curr_active, addr, true); } /* slave being removed should not be active at this point * * Caller must hold rtnl. */ static void rlb_clear_slave(struct bonding *bond, struct slave *slave) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct rlb_client_info *rx_hash_table; u32 index, next_index; /* clear slave from rx_hashtbl */ spin_lock_bh(&bond->mode_lock); rx_hash_table = bond_info->rx_hashtbl; index = bond_info->rx_hashtbl_used_head; for (; index != RLB_NULL_INDEX; index = next_index) { next_index = rx_hash_table[index].used_next; if (rx_hash_table[index].slave == slave) { struct slave *assigned_slave = rlb_next_rx_slave(bond); if (assigned_slave) { rx_hash_table[index].slave = assigned_slave; if (is_valid_ether_addr(rx_hash_table[index].mac_dst)) { bond_info->rx_hashtbl[index].ntt = 1; bond_info->rx_ntt = 1; /* A slave has been removed from the * table because it is either disabled * or being released. We must retry the * update to avoid clients from not * being updated & disconnecting when * there is stress */ bond_info->rlb_update_retry_counter = RLB_UPDATE_RETRY; } } else { /* there is no active slave */ rx_hash_table[index].slave = NULL; } } } spin_unlock_bh(&bond->mode_lock); if (slave != rtnl_dereference(bond->curr_active_slave)) rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr); } static void rlb_update_client(struct rlb_client_info *client_info) { int i; if (!client_info->slave || !is_valid_ether_addr(client_info->mac_dst)) return; for (i = 0; i < RLB_ARP_BURST_SIZE; i++) { struct sk_buff *skb; skb = arp_create(ARPOP_REPLY, ETH_P_ARP, client_info->ip_dst, client_info->slave->dev, client_info->ip_src, client_info->mac_dst, client_info->slave->dev->dev_addr, client_info->mac_dst); if (!skb) { slave_err(client_info->slave->bond->dev, client_info->slave->dev, "failed to create an ARP packet\n"); continue; } skb->dev = client_info->slave->dev; if (client_info->vlan_id) { __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), client_info->vlan_id); } arp_xmit(skb); } } /* sends ARP REPLIES that update the clients that need updating */ static void rlb_update_rx_clients(struct bonding *bond) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct rlb_client_info *client_info; u32 hash_index; spin_lock_bh(&bond->mode_lock); hash_index = bond_info->rx_hashtbl_used_head; for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->used_next) { client_info = &(bond_info->rx_hashtbl[hash_index]); if (client_info->ntt) { rlb_update_client(client_info); if (bond_info->rlb_update_retry_counter == 0) client_info->ntt = 0; } } /* do not update the entries again until this counter is zero so that * not to confuse the clients. */ bond_info->rlb_update_delay_counter = RLB_UPDATE_DELAY; spin_unlock_bh(&bond->mode_lock); } /* The slave was assigned a new mac address - update the clients */ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *slave) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct rlb_client_info *client_info; int ntt = 0; u32 hash_index; spin_lock_bh(&bond->mode_lock); hash_index = bond_info->rx_hashtbl_used_head; for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->used_next) { client_info = &(bond_info->rx_hashtbl[hash_index]); if ((client_info->slave == slave) && is_valid_ether_addr(client_info->mac_dst)) { client_info->ntt = 1; ntt = 1; } } /* update the team's flag only after the whole iteration */ if (ntt) { bond_info->rx_ntt = 1; /* fasten the change */ bond_info->rlb_update_retry_counter = RLB_UPDATE_RETRY; } spin_unlock_bh(&bond->mode_lock); } /* mark all clients using src_ip to be updated */ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct rlb_client_info *client_info; u32 hash_index; spin_lock(&bond->mode_lock); hash_index = bond_info->rx_hashtbl_used_head; for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->used_next) { client_info = &(bond_info->rx_hashtbl[hash_index]); if (!client_info->slave) { netdev_err(bond->dev, "found a client with no channel in the client's hash table\n"); continue; } /* update all clients using this src_ip, that are not assigned * to the team's address (curr_active_slave) and have a known * unicast mac address. */ if ((client_info->ip_src == src_ip) && !ether_addr_equal_64bits(client_info->slave->dev->dev_addr, bond->dev->dev_addr) && is_valid_ether_addr(client_info->mac_dst)) { client_info->ntt = 1; bond_info->rx_ntt = 1; } } spin_unlock(&bond->mode_lock); } static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond, const struct arp_pkt *arp) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct slave *assigned_slave, *curr_active_slave; struct rlb_client_info *client_info; u32 hash_index = 0; spin_lock(&bond->mode_lock); curr_active_slave = rcu_dereference(bond->curr_active_slave); hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_dst)); client_info = &(bond_info->rx_hashtbl[hash_index]); if (client_info->assigned) { if ((client_info->ip_src == arp->ip_src) && (client_info->ip_dst == arp->ip_dst)) { /* the entry is already assigned to this client */ if (!is_broadcast_ether_addr(arp->mac_dst)) { /* update mac address from arp */ ether_addr_copy(client_info->mac_dst, arp->mac_dst); } ether_addr_copy(client_info->mac_src, arp->mac_src); assigned_slave = client_info->slave; if (assigned_slave) { spin_unlock(&bond->mode_lock); return assigned_slave; } } else { /* the entry is already assigned to some other client, * move the old client to primary (curr_active_slave) so * that the new client can be assigned to this entry. */ if (curr_active_slave && client_info->slave != curr_active_slave) { client_info->slave = curr_active_slave; rlb_update_client(client_info); } } } /* assign a new slave */ assigned_slave = __rlb_next_rx_slave(bond); if (assigned_slave) { if (!(client_info->assigned && client_info->ip_src == arp->ip_src)) { /* ip_src is going to be updated, * fix the src hash list */ u32 hash_src = _simple_hash((u8 *)&arp->ip_src, sizeof(arp->ip_src)); rlb_src_unlink(bond, hash_index); rlb_src_link(bond, hash_src, hash_index); } client_info->ip_src = arp->ip_src; client_info->ip_dst = arp->ip_dst; /* arp->mac_dst is broadcast for arp requests. * will be updated with clients actual unicast mac address * upon receiving an arp reply. */ ether_addr_copy(client_info->mac_dst, arp->mac_dst); ether_addr_copy(client_info->mac_src, arp->mac_src); client_info->slave = assigned_slave; if (is_valid_ether_addr(client_info->mac_dst)) { client_info->ntt = 1; bond->alb_info.rx_ntt = 1; } else { client_info->ntt = 0; } if (vlan_get_tag(skb, &client_info->vlan_id)) client_info->vlan_id = 0; if (!client_info->assigned) { u32 prev_tbl_head = bond_info->rx_hashtbl_used_head; bond_info->rx_hashtbl_used_head = hash_index; client_info->used_next = prev_tbl_head; if (prev_tbl_head != RLB_NULL_INDEX) { bond_info->rx_hashtbl[prev_tbl_head].used_prev = hash_index; } client_info->assigned = 1; } } spin_unlock(&bond->mode_lock); return assigned_slave; } /* chooses (and returns) transmit channel for arp reply * does not choose channel for other arp types since they are * sent on the curr_active_slave */ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) { struct slave *tx_slave = NULL; struct net_device *dev; struct arp_pkt *arp; if (!pskb_network_may_pull(skb, sizeof(*arp))) return NULL; arp = (struct arp_pkt *)skb_network_header(skb); /* Don't modify or load balance ARPs that do not originate * from the bond itself or a VLAN directly above the bond. */ if (!bond_slave_has_mac_rcu(bond, arp->mac_src)) return NULL; dev = ip_dev_find(dev_net(bond->dev), arp->ip_src); if (dev) { if (netif_is_any_bridge_master(dev)) { dev_put(dev); return NULL; } dev_put(dev); } if (arp->op_code == htons(ARPOP_REPLY)) { /* the arp must be sent on the selected rx channel */ tx_slave = rlb_choose_channel(skb, bond, arp); if (tx_slave) bond_hw_addr_copy(arp->mac_src, tx_slave->dev->dev_addr, tx_slave->dev->addr_len); netdev_dbg(bond->dev, "(slave %s): Server sent ARP Reply packet\n", tx_slave ? tx_slave->dev->name : "NULL"); } else if (arp->op_code == htons(ARPOP_REQUEST)) { /* Create an entry in the rx_hashtbl for this client as a * place holder. * When the arp reply is received the entry will be updated * with the correct unicast address of the client. */ tx_slave = rlb_choose_channel(skb, bond, arp); /* The ARP reply packets must be delayed so that * they can cancel out the influence of the ARP request. */ bond->alb_info.rlb_update_delay_counter = RLB_UPDATE_DELAY; /* arp requests are broadcast and are sent on the primary * the arp request will collapse all clients on the subnet to * the primary slave. We must register these clients to be * updated with their assigned mac. */ rlb_req_update_subnet_clients(bond, arp->ip_src); netdev_dbg(bond->dev, "(slave %s): Server sent ARP Request packet\n", tx_slave ? tx_slave->dev->name : "NULL"); } return tx_slave; } static void rlb_rebalance(struct bonding *bond) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct slave *assigned_slave; struct rlb_client_info *client_info; int ntt; u32 hash_index; spin_lock_bh(&bond->mode_lock); ntt = 0; hash_index = bond_info->rx_hashtbl_used_head; for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->used_next) { client_info = &(bond_info->rx_hashtbl[hash_index]); assigned_slave = __rlb_next_rx_slave(bond); if (assigned_slave && (client_info->slave != assigned_slave)) { client_info->slave = assigned_slave; if (!is_zero_ether_addr(client_info->mac_dst)) { client_info->ntt = 1; ntt = 1; } } } /* update the team's flag only after the whole iteration */ if (ntt) bond_info->rx_ntt = 1; spin_unlock_bh(&bond->mode_lock); } /* Caller must hold mode_lock */ static void rlb_init_table_entry_dst(struct rlb_client_info *entry) { entry->used_next = RLB_NULL_INDEX; entry->used_prev = RLB_NULL_INDEX; entry->assigned = 0; entry->slave = NULL; entry->vlan_id = 0; } static void rlb_init_table_entry_src(struct rlb_client_info *entry) { entry->src_first = RLB_NULL_INDEX; entry->src_prev = RLB_NULL_INDEX; entry->src_next = RLB_NULL_INDEX; } static void rlb_init_table_entry(struct rlb_client_info *entry) { memset(entry, 0, sizeof(struct rlb_client_info)); rlb_init_table_entry_dst(entry); rlb_init_table_entry_src(entry); } static void rlb_delete_table_entry_dst(struct bonding *bond, u32 index) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); u32 next_index = bond_info->rx_hashtbl[index].used_next; u32 prev_index = bond_info->rx_hashtbl[index].used_prev; if (index == bond_info->rx_hashtbl_used_head) bond_info->rx_hashtbl_used_head = next_index; if (prev_index != RLB_NULL_INDEX) bond_info->rx_hashtbl[prev_index].used_next = next_index; if (next_index != RLB_NULL_INDEX) bond_info->rx_hashtbl[next_index].used_prev = prev_index; } /* unlink a rlb hash table entry from the src list */ static void rlb_src_unlink(struct bonding *bond, u32 index) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); u32 next_index = bond_info->rx_hashtbl[index].src_next; u32 prev_index = bond_info->rx_hashtbl[index].src_prev; bond_info->rx_hashtbl[index].src_next = RLB_NULL_INDEX; bond_info->rx_hashtbl[index].src_prev = RLB_NULL_INDEX; if (next_index != RLB_NULL_INDEX) bond_info->rx_hashtbl[next_index].src_prev = prev_index; if (prev_index == RLB_NULL_INDEX) return; /* is prev_index pointing to the head of this list? */ if (bond_info->rx_hashtbl[prev_index].src_first == index) bond_info->rx_hashtbl[prev_index].src_first = next_index; else bond_info->rx_hashtbl[prev_index].src_next = next_index; } static void rlb_delete_table_entry(struct bonding *bond, u32 index) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct rlb_client_info *entry = &(bond_info->rx_hashtbl[index]); rlb_delete_table_entry_dst(bond, index); rlb_init_table_entry_dst(entry); rlb_src_unlink(bond, index); } /* add the rx_hashtbl[ip_dst_hash] entry to the list * of entries with identical ip_src_hash */ static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, u32 ip_dst_hash) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); u32 next; bond_info->rx_hashtbl[ip_dst_hash].src_prev = ip_src_hash; next = bond_info->rx_hashtbl[ip_src_hash].src_first; bond_info->rx_hashtbl[ip_dst_hash].src_next = next; if (next != RLB_NULL_INDEX) bond_info->rx_hashtbl[next].src_prev = ip_dst_hash; bond_info->rx_hashtbl[ip_src_hash].src_first = ip_dst_hash; } /* deletes all rx_hashtbl entries with arp->ip_src if their mac_src does * not match arp->mac_src */ static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); u32 ip_src_hash = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src)); u32 index; spin_lock_bh(&bond->mode_lock); index = bond_info->rx_hashtbl[ip_src_hash].src_first; while (index != RLB_NULL_INDEX) { struct rlb_client_info *entry = &(bond_info->rx_hashtbl[index]); u32 next_index = entry->src_next; if (entry->ip_src == arp->ip_src && !ether_addr_equal_64bits(arp->mac_src, entry->mac_src)) rlb_delete_table_entry(bond, index); index = next_index; } spin_unlock_bh(&bond->mode_lock); } static int rlb_initialize(struct bonding *bond) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct rlb_client_info *new_hashtbl; int size = RLB_HASH_TABLE_SIZE * sizeof(struct rlb_client_info); int i; new_hashtbl = kmalloc(size, GFP_KERNEL); if (!new_hashtbl) return -1; spin_lock_bh(&bond->mode_lock); bond_info->rx_hashtbl = new_hashtbl; bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX; for (i = 0; i < RLB_HASH_TABLE_SIZE; i++) rlb_init_table_entry(bond_info->rx_hashtbl + i); spin_unlock_bh(&bond->mode_lock); /* register to receive ARPs */ bond->recv_probe = rlb_arp_recv; return 0; } static void rlb_deinitialize(struct bonding *bond) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); spin_lock_bh(&bond->mode_lock); kfree(bond_info->rx_hashtbl); bond_info->rx_hashtbl = NULL; bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX; spin_unlock_bh(&bond->mode_lock); } static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); u32 curr_index; spin_lock_bh(&bond->mode_lock); curr_index = bond_info->rx_hashtbl_used_head; while (curr_index != RLB_NULL_INDEX) { struct rlb_client_info *curr = &(bond_info->rx_hashtbl[curr_index]); u32 next_index = bond_info->rx_hashtbl[curr_index].used_next; if (curr->vlan_id == vlan_id) rlb_delete_table_entry(bond, curr_index); curr_index = next_index; } spin_unlock_bh(&bond->mode_lock); } /*********************** tlb/rlb shared functions *********************/ static void alb_send_lp_vid(struct slave *slave, const u8 mac_addr[], __be16 vlan_proto, u16 vid) { struct learning_pkt pkt; struct sk_buff *skb; int size = sizeof(struct learning_pkt); memset(&pkt, 0, size); ether_addr_copy(pkt.mac_dst, mac_addr); ether_addr_copy(pkt.mac_src, mac_addr); pkt.type = cpu_to_be16(ETH_P_LOOPBACK); skb = dev_alloc_skb(size); if (!skb) return; skb_put_data(skb, &pkt, size); skb_reset_mac_header(skb); skb->network_header = skb->mac_header + ETH_HLEN; skb->protocol = pkt.type; skb->priority = TC_PRIO_CONTROL; skb->dev = slave->dev; slave_dbg(slave->bond->dev, slave->dev, "Send learning packet: mac %pM vlan %d\n", mac_addr, vid); if (vid) __vlan_hwaccel_put_tag(skb, vlan_proto, vid); dev_queue_xmit(skb); } struct alb_walk_data { struct bonding *bond; struct slave *slave; const u8 *mac_addr; bool strict_match; }; static int alb_upper_dev_walk(struct net_device *upper, struct netdev_nested_priv *priv) { struct alb_walk_data *data = (struct alb_walk_data *)priv->data; bool strict_match = data->strict_match; const u8 *mac_addr = data->mac_addr; struct bonding *bond = data->bond; struct slave *slave = data->slave; struct bond_vlan_tag *tags; if (is_vlan_dev(upper) && bond->dev->lower_level == upper->lower_level - 1) { if (upper->addr_assign_type == NET_ADDR_STOLEN) { alb_send_lp_vid(slave, mac_addr, vlan_dev_vlan_proto(upper), vlan_dev_vlan_id(upper)); } else { alb_send_lp_vid(slave, upper->dev_addr, vlan_dev_vlan_proto(upper), vlan_dev_vlan_id(upper)); } } /* If this is a macvlan device, then only send updates * when strict_match is turned off. */ if (netif_is_macvlan(upper) && !strict_match) { tags = bond_verify_device_path(bond->dev, upper, 0); if (IS_ERR_OR_NULL(tags)) return -ENOMEM; alb_send_lp_vid(slave, upper->dev_addr, tags[0].vlan_proto, tags[0].vlan_id); kfree(tags); } return 0; } static void alb_send_learning_packets(struct slave *slave, const u8 mac_addr[], bool strict_match) { struct bonding *bond = bond_get_bond_by_slave(slave); struct netdev_nested_priv priv; struct alb_walk_data data = { .strict_match = strict_match, .mac_addr = mac_addr, .slave = slave, .bond = bond, }; priv.data = (void *)&data; /* send untagged */ alb_send_lp_vid(slave, mac_addr, 0, 0); /* loop through all devices and see if we need to send a packet * for that device. */ rcu_read_lock(); netdev_walk_all_upper_dev_rcu(bond->dev, alb_upper_dev_walk, &priv); rcu_read_unlock(); } static int alb_set_slave_mac_addr(struct slave *slave, const u8 addr[], unsigned int len) { struct net_device *dev = slave->dev; struct sockaddr_storage ss; if (BOND_MODE(slave->bond) == BOND_MODE_TLB) { __dev_addr_set(dev, addr, len); return 0; } /* for rlb each slave must have a unique hw mac addresses so that * each slave will receive packets destined to a different mac */ memcpy(ss.__data, addr, len); ss.ss_family = dev->type; if (dev_set_mac_address(dev, &ss, NULL)) { slave_err(slave->bond->dev, dev, "dev_set_mac_address on slave failed! ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n"); return -EOPNOTSUPP; } return 0; } /* Swap MAC addresses between two slaves. * * Called with RTNL held, and no other locks. */ static void alb_swap_mac_addr(struct slave *slave1, struct slave *slave2) { u8 tmp_mac_addr[MAX_ADDR_LEN]; bond_hw_addr_copy(tmp_mac_addr, slave1->dev->dev_addr, slave1->dev->addr_len); alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr, slave2->dev->addr_len); alb_set_slave_mac_addr(slave2, tmp_mac_addr, slave1->dev->addr_len); } /* Send learning packets after MAC address swap. * * Called with RTNL and no other locks */ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1, struct slave *slave2) { int slaves_state_differ = (bond_slave_can_tx(slave1) != bond_slave_can_tx(slave2)); struct slave *disabled_slave = NULL; ASSERT_RTNL(); /* fasten the change in the switch */ if (bond_slave_can_tx(slave1)) { alb_send_learning_packets(slave1, slave1->dev->dev_addr, false); if (bond->alb_info.rlb_enabled) { /* inform the clients that the mac address * has changed */ rlb_req_update_slave_clients(bond, slave1); } } else { disabled_slave = slave1; } if (bond_slave_can_tx(slave2)) { alb_send_learning_packets(slave2, slave2->dev->dev_addr, false); if (bond->alb_info.rlb_enabled) { /* inform the clients that the mac address * has changed */ rlb_req_update_slave_clients(bond, slave2); } } else { disabled_slave = slave2; } if (bond->alb_info.rlb_enabled && slaves_state_differ) { /* A disabled slave was assigned an active mac addr */ rlb_teach_disabled_mac_on_primary(bond, disabled_slave->dev->dev_addr); } } /** * alb_change_hw_addr_on_detach * @bond: bonding we're working on * @slave: the slave that was just detached * * We assume that @slave was already detached from the slave list. * * If @slave's permanent hw address is different both from its current * address and from @bond's address, then somewhere in the bond there's * a slave that has @slave's permanet address as its current address. * We'll make sure that slave no longer uses @slave's permanent address. * * Caller must hold RTNL and no other locks */ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *slave) { int perm_curr_diff; int perm_bond_diff; struct slave *found_slave; perm_curr_diff = !ether_addr_equal_64bits(slave->perm_hwaddr, slave->dev->dev_addr); perm_bond_diff = !ether_addr_equal_64bits(slave->perm_hwaddr, bond->dev->dev_addr); if (perm_curr_diff && perm_bond_diff) { found_slave = bond_slave_has_mac(bond, slave->perm_hwaddr); if (found_slave) { alb_swap_mac_addr(slave, found_slave); alb_fasten_mac_swap(bond, slave, found_slave); } } } /** * alb_handle_addr_collision_on_attach * @bond: bonding we're working on * @slave: the slave that was just attached * * checks uniqueness of slave's mac address and handles the case the * new slave uses the bonds mac address. * * If the permanent hw address of @slave is @bond's hw address, we need to * find a different hw address to give @slave, that isn't in use by any other * slave in the bond. This address must be, of course, one of the permanent * addresses of the other slaves. * * We go over the slave list, and for each slave there we compare its * permanent hw address with the current address of all the other slaves. * If no match was found, then we've found a slave with a permanent address * that isn't used by any other slave in the bond, so we can assign it to * @slave. * * assumption: this function is called before @slave is attached to the * bond slave list. */ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slave *slave) { struct slave *has_bond_addr = rcu_access_pointer(bond->curr_active_slave); struct slave *tmp_slave1, *free_mac_slave = NULL; struct list_head *iter; if (!bond_has_slaves(bond)) { /* this is the first slave */ return 0; } /* if slave's mac address differs from bond's mac address * check uniqueness of slave's mac address against the other * slaves in the bond. */ if (!ether_addr_equal_64bits(slave->perm_hwaddr, bond->dev->dev_addr)) { if (!bond_slave_has_mac(bond, slave->dev->dev_addr)) return 0; /* Try setting slave mac to bond address and fall-through * to code handling that situation below... */ alb_set_slave_mac_addr(slave, bond->dev->dev_addr, bond->dev->addr_len); } /* The slave's address is equal to the address of the bond. * Search for a spare address in the bond for this slave. */ bond_for_each_slave(bond, tmp_slave1, iter) { if (!bond_slave_has_mac(bond, tmp_slave1->perm_hwaddr)) { /* no slave has tmp_slave1's perm addr * as its curr addr */ free_mac_slave = tmp_slave1; break; } if (!has_bond_addr) { if (ether_addr_equal_64bits(tmp_slave1->dev->dev_addr, bond->dev->dev_addr)) { has_bond_addr = tmp_slave1; } } } if (free_mac_slave) { alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr, free_mac_slave->dev->addr_len); slave_warn(bond->dev, slave->dev, "the slave hw address is in use by the bond; giving it the hw address of %s\n", free_mac_slave->dev->name); } else if (has_bond_addr) { slave_err(bond->dev, slave->dev, "the slave hw address is in use by the bond; couldn't find a slave with a free hw address to give it (this should not have happened)\n"); return -EFAULT; } return 0; } /** * alb_set_mac_address * @bond: bonding we're working on * @addr: MAC address to set * * In TLB mode all slaves are configured to the bond's hw address, but set * their dev_addr field to different addresses (based on their permanent hw * addresses). * * For each slave, this function sets the interface to the new address and then * changes its dev_addr field to its previous value. * * Unwinding assumes bond's mac address has not yet changed. */ static int alb_set_mac_address(struct bonding *bond, void *addr) { struct slave *slave, *rollback_slave; struct list_head *iter; struct sockaddr_storage ss; char tmp_addr[MAX_ADDR_LEN]; int res; if (bond->alb_info.rlb_enabled) return 0; bond_for_each_slave(bond, slave, iter) { /* save net_device's current hw address */ bond_hw_addr_copy(tmp_addr, slave->dev->dev_addr, slave->dev->addr_len); res = dev_set_mac_address(slave->dev, addr, NULL); /* restore net_device's hw address */ dev_addr_set(slave->dev, tmp_addr); if (res) goto unwind; } return 0; unwind: memcpy(ss.__data, bond->dev->dev_addr, bond->dev->addr_len); ss.ss_family = bond->dev->type; /* unwind from head to the slave that failed */ bond_for_each_slave(bond, rollback_slave, iter) { if (rollback_slave == slave) break; bond_hw_addr_copy(tmp_addr, rollback_slave->dev->dev_addr, rollback_slave->dev->addr_len); dev_set_mac_address(rollback_slave->dev, &ss, NULL); dev_addr_set(rollback_slave->dev, tmp_addr); } return res; } /* determine if the packet is NA or NS */ static bool alb_determine_nd(struct sk_buff *skb, struct bonding *bond) { struct ipv6hdr *ip6hdr; struct icmp6hdr *hdr; if (!pskb_network_may_pull(skb, sizeof(*ip6hdr))) return true; ip6hdr = ipv6_hdr(skb); if (ip6hdr->nexthdr != IPPROTO_ICMPV6) return false; if (!pskb_network_may_pull(skb, sizeof(*ip6hdr) + sizeof(*hdr))) return true; hdr = icmp6_hdr(skb); return hdr->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT || hdr->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION; } /************************ exported alb functions ************************/ int bond_alb_initialize(struct bonding *bond, int rlb_enabled) { int res; res = tlb_initialize(bond); if (res) return res; if (rlb_enabled) { res = rlb_initialize(bond); if (res) { tlb_deinitialize(bond); return res; } bond->alb_info.rlb_enabled = 1; } else { bond->alb_info.rlb_enabled = 0; } return 0; } void bond_alb_deinitialize(struct bonding *bond) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); tlb_deinitialize(bond); if (bond_info->rlb_enabled) rlb_deinitialize(bond); } static netdev_tx_t bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond, struct slave *tx_slave) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct ethhdr *eth_data = eth_hdr(skb); if (!tx_slave) { /* unbalanced or unassigned, send through primary */ tx_slave = rcu_dereference(bond->curr_active_slave); if (bond->params.tlb_dynamic_lb) bond_info->unbalanced_load += skb->len; } if (tx_slave && bond_slave_can_tx(tx_slave)) { if (tx_slave != rcu_access_pointer(bond->curr_active_slave)) { ether_addr_copy(eth_data->h_source, tx_slave->dev->dev_addr); } return bond_dev_queue_xmit(bond, skb, tx_slave->dev); } if (tx_slave && bond->params.tlb_dynamic_lb) { spin_lock(&bond->mode_lock); __tlb_clear_slave(bond, tx_slave, 0); spin_unlock(&bond->mode_lock); } /* no suitable interface, frame not sent */ return bond_tx_drop(bond->dev, skb); } struct slave *bond_xmit_tlb_slave_get(struct bonding *bond, struct sk_buff *skb) { struct slave *tx_slave = NULL; struct ethhdr *eth_data; u32 hash_index; skb_reset_mac_header(skb); eth_data = eth_hdr(skb); /* Do not TX balance any multicast or broadcast */ if (!is_multicast_ether_addr(eth_data->h_dest)) { switch (skb->protocol) { case htons(ETH_P_IPV6): if (alb_determine_nd(skb, bond)) break; fallthrough; case htons(ETH_P_IP): hash_index = bond_xmit_hash(bond, skb); if (bond->params.tlb_dynamic_lb) { tx_slave = tlb_choose_channel(bond, hash_index & 0xFF, skb->len); } else { struct bond_up_slave *slaves; unsigned int count; slaves = rcu_dereference(bond->usable_slaves); count = slaves ? READ_ONCE(slaves->count) : 0; if (likely(count)) tx_slave = slaves->arr[hash_index % count]; } break; } } return tx_slave; } netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); struct slave *tx_slave; tx_slave = bond_xmit_tlb_slave_get(bond, skb); return bond_do_alb_xmit(skb, bond, tx_slave); } struct slave *bond_xmit_alb_slave_get(struct bonding *bond, struct sk_buff *skb) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); static const __be32 ip_bcast = htonl(0xffffffff); struct slave *tx_slave = NULL; const u8 *hash_start = NULL; bool do_tx_balance = true; struct ethhdr *eth_data; u32 hash_index = 0; int hash_size = 0; skb_reset_mac_header(skb); eth_data = eth_hdr(skb); switch (ntohs(skb->protocol)) { case ETH_P_IP: { const struct iphdr *iph; if (is_broadcast_ether_addr(eth_data->h_dest) || !pskb_network_may_pull(skb, sizeof(*iph))) { do_tx_balance = false; break; } iph = ip_hdr(skb); if (iph->daddr == ip_bcast || iph->protocol == IPPROTO_IGMP) { do_tx_balance = false; break; } hash_start = (char *)&(iph->daddr); hash_size = sizeof(iph->daddr); break; } case ETH_P_IPV6: { const struct ipv6hdr *ip6hdr; /* IPv6 doesn't really use broadcast mac address, but leave * that here just in case. */ if (is_broadcast_ether_addr(eth_data->h_dest)) { do_tx_balance = false; break; } /* IPv6 uses all-nodes multicast as an equivalent to * broadcasts in IPv4. */ if (ether_addr_equal_64bits(eth_data->h_dest, mac_v6_allmcast)) { do_tx_balance = false; break; } if (alb_determine_nd(skb, bond)) { do_tx_balance = false; break; } /* The IPv6 header is pulled by alb_determine_nd */ /* Additionally, DAD probes should not be tx-balanced as that * will lead to false positives for duplicate addresses and * prevent address configuration from working. */ ip6hdr = ipv6_hdr(skb); if (ipv6_addr_any(&ip6hdr->saddr)) { do_tx_balance = false; break; } hash_start = (char *)&ip6hdr->daddr; hash_size = sizeof(ip6hdr->daddr); break; } case ETH_P_ARP: do_tx_balance = false; if (bond_info->rlb_enabled) tx_slave = rlb_arp_xmit(skb, bond); break; default: do_tx_balance = false; break; } if (do_tx_balance) { if (bond->params.tlb_dynamic_lb) { hash_index = _simple_hash(hash_start, hash_size); tx_slave = tlb_choose_channel(bond, hash_index, skb->len); } else { /* * do_tx_balance means we are free to select the tx_slave * So we do exactly what tlb would do for hash selection */ struct bond_up_slave *slaves; unsigned int count; slaves = rcu_dereference(bond->usable_slaves); count = slaves ? READ_ONCE(slaves->count) : 0; if (likely(count)) tx_slave = slaves->arr[bond_xmit_hash(bond, skb) % count]; } } return tx_slave; } netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); struct slave *tx_slave = NULL; tx_slave = bond_xmit_alb_slave_get(bond, skb); return bond_do_alb_xmit(skb, bond, tx_slave); } void bond_alb_monitor(struct work_struct *work) { struct bonding *bond = container_of(work, struct bonding, alb_work.work); struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct list_head *iter; struct slave *slave; if (!bond_has_slaves(bond)) { atomic_set(&bond_info->tx_rebalance_counter, 0); bond_info->lp_counter = 0; goto re_arm; } rcu_read_lock(); atomic_inc(&bond_info->tx_rebalance_counter); bond_info->lp_counter++; /* send learning packets */ if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) { bool strict_match; bond_for_each_slave_rcu(bond, slave, iter) { /* If updating current_active, use all currently * user mac addresses (!strict_match). Otherwise, only * use mac of the slave device. * In RLB mode, we always use strict matches. */ strict_match = (slave != rcu_access_pointer(bond->curr_active_slave) || bond_info->rlb_enabled); alb_send_learning_packets(slave, slave->dev->dev_addr, strict_match); } bond_info->lp_counter = 0; } /* rebalance tx traffic */ if (atomic_read(&bond_info->tx_rebalance_counter) >= BOND_TLB_REBALANCE_TICKS) { bond_for_each_slave_rcu(bond, slave, iter) { tlb_clear_slave(bond, slave, 1); if (slave == rcu_access_pointer(bond->curr_active_slave)) { SLAVE_TLB_INFO(slave).load = bond_info->unbalanced_load / BOND_TLB_REBALANCE_INTERVAL; bond_info->unbalanced_load = 0; } } atomic_set(&bond_info->tx_rebalance_counter, 0); } if (bond_info->rlb_enabled) { if (bond_info->primary_is_promisc && (++bond_info->rlb_promisc_timeout_counter >= RLB_PROMISC_TIMEOUT)) { /* dev_set_promiscuity requires rtnl and * nothing else. Avoid race with bond_close. */ rcu_read_unlock(); if (!rtnl_trylock()) goto re_arm; bond_info->rlb_promisc_timeout_counter = 0; /* If the primary was set to promiscuous mode * because a slave was disabled then * it can now leave promiscuous mode. */ dev_set_promiscuity(rtnl_dereference(bond->curr_active_slave)->dev, -1); bond_info->primary_is_promisc = 0; rtnl_unlock(); rcu_read_lock(); } if (bond_info->rlb_rebalance) { bond_info->rlb_rebalance = 0; rlb_rebalance(bond); } /* check if clients need updating */ if (bond_info->rx_ntt) { if (bond_info->rlb_update_delay_counter) { --bond_info->rlb_update_delay_counter; } else { rlb_update_rx_clients(bond); if (bond_info->rlb_update_retry_counter) --bond_info->rlb_update_retry_counter; else bond_info->rx_ntt = 0; } } } rcu_read_unlock(); re_arm: queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks); } /* assumption: called before the slave is attached to the bond * and not locked by the bond lock */ int bond_alb_init_slave(struct bonding *bond, struct slave *slave) { int res; res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr, slave->dev->addr_len); if (res) return res; res = alb_handle_addr_collision_on_attach(bond, slave); if (res) return res; tlb_init_slave(slave); /* order a rebalance ASAP */ atomic_set(&bond->alb_info.tx_rebalance_counter, BOND_TLB_REBALANCE_TICKS); if (bond->alb_info.rlb_enabled) bond->alb_info.rlb_rebalance = 1; return 0; } /* Remove slave from tlb and rlb hash tables, and fix up MAC addresses * if necessary. * * Caller must hold RTNL and no other locks */ void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave) { if (bond_has_slaves(bond)) alb_change_hw_addr_on_detach(bond, slave); tlb_clear_slave(bond, slave, 0); if (bond->alb_info.rlb_enabled) { bond->alb_info.rx_slave = NULL; rlb_clear_slave(bond, slave); } } void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); if (link == BOND_LINK_DOWN) { tlb_clear_slave(bond, slave, 0); if (bond->alb_info.rlb_enabled) rlb_clear_slave(bond, slave); } else if (link == BOND_LINK_UP) { /* order a rebalance ASAP */ atomic_set(&bond_info->tx_rebalance_counter, BOND_TLB_REBALANCE_TICKS); if (bond->alb_info.rlb_enabled) { bond->alb_info.rlb_rebalance = 1; /* If the updelay module parameter is smaller than the * forwarding delay of the switch the rebalance will * not work because the rebalance arp replies will * not be forwarded to the clients.. */ } } if (bond_is_nondyn_tlb(bond)) { if (bond_update_slave_arr(bond, NULL)) pr_err("Failed to build slave-array for TLB mode.\n"); } } /** * bond_alb_handle_active_change - assign new curr_active_slave * @bond: our bonding struct * @new_slave: new slave to assign * * Set the bond->curr_active_slave to @new_slave and handle * mac address swapping and promiscuity changes as needed. * * Caller must hold RTNL */ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave) { struct slave *swap_slave; struct slave *curr_active; curr_active = rtnl_dereference(bond->curr_active_slave); if (curr_active == new_slave) return; if (curr_active && bond->alb_info.primary_is_promisc) { dev_set_promiscuity(curr_active->dev, -1); bond->alb_info.primary_is_promisc = 0; bond->alb_info.rlb_promisc_timeout_counter = 0; } swap_slave = curr_active; rcu_assign_pointer(bond->curr_active_slave, new_slave); if (!new_slave || !bond_has_slaves(bond)) return; /* set the new curr_active_slave to the bonds mac address * i.e. swap mac addresses of old curr_active_slave and new curr_active_slave */ if (!swap_slave) swap_slave = bond_slave_has_mac(bond, bond->dev->dev_addr); /* Arrange for swap_slave and new_slave to temporarily be * ignored so we can mess with their MAC addresses without * fear of interference from transmit activity. */ if (swap_slave) tlb_clear_slave(bond, swap_slave, 1); tlb_clear_slave(bond, new_slave, 1); /* in TLB mode, the slave might flip down/up with the old dev_addr, * and thus filter bond->dev_addr's packets, so force bond's mac */ if (BOND_MODE(bond) == BOND_MODE_TLB) { struct sockaddr_storage ss; u8 tmp_addr[MAX_ADDR_LEN]; bond_hw_addr_copy(tmp_addr, new_slave->dev->dev_addr, new_slave->dev->addr_len); bond_hw_addr_copy(ss.__data, bond->dev->dev_addr, bond->dev->addr_len); ss.ss_family = bond->dev->type; /* we don't care if it can't change its mac, best effort */ dev_set_mac_address(new_slave->dev, &ss, NULL); dev_addr_set(new_slave->dev, tmp_addr); } /* curr_active_slave must be set before calling alb_swap_mac_addr */ if (swap_slave) { /* swap mac address */ alb_swap_mac_addr(swap_slave, new_slave); alb_fasten_mac_swap(bond, swap_slave, new_slave); } else { /* set the new_slave to the bond mac address */ alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr, bond->dev->addr_len); alb_send_learning_packets(new_slave, bond->dev->dev_addr, false); } } /* Called with RTNL */ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) { struct bonding *bond = netdev_priv(bond_dev); struct sockaddr_storage *ss = addr; struct slave *curr_active; struct slave *swap_slave; int res; if (!is_valid_ether_addr(ss->__data)) return -EADDRNOTAVAIL; res = alb_set_mac_address(bond, addr); if (res) return res; dev_addr_set(bond_dev, ss->__data); /* If there is no curr_active_slave there is nothing else to do. * Otherwise we'll need to pass the new address to it and handle * duplications. */ curr_active = rtnl_dereference(bond->curr_active_slave); if (!curr_active) return 0; swap_slave = bond_slave_has_mac(bond, bond_dev->dev_addr); if (swap_slave) { alb_swap_mac_addr(swap_slave, curr_active); alb_fasten_mac_swap(bond, swap_slave, curr_active); } else { alb_set_slave_mac_addr(curr_active, bond_dev->dev_addr, bond_dev->addr_len); alb_send_learning_packets(curr_active, bond_dev->dev_addr, false); if (bond->alb_info.rlb_enabled) { /* inform clients mac address has changed */ rlb_req_update_slave_clients(bond, curr_active); } } return 0; } void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id) { if (bond->alb_info.rlb_enabled) rlb_clear_vlan(bond, vlan_id); }
8 7 5 7 7 6 4 6 5 4 3 5 5 4 3 3 3 3 3 3 2 2 2 2 159 160 158 159 160 159 1 159 158 10 10 10 9 9 8 3 7 10 10 10 12 12 3 10 12 6 6 6 6 6 6 11 10 2 8 7 1 6 1 6 8 7 8 5 5 2 4 4 2 2 2 1 5 5 2 4 5 6 6 5 4 5 4 7 13 11 11 1 10 10 10 8 8 8 2 1 1 1 7 2 9 8 5 13 12 11 10 9 8 7 7 7 7 7 7 7 7 7 7 7 6 6 6 6 6 5 5 6 6 6 2 2 2 4 6 4 3 1 7 3 144 144 144 70 70 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 /* * Copyright (c) 2016 Intel Corporation * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting documentation, and * that the name of the copyright holders not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. The copyright holders make no representations * about the suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ #include <linux/export.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <drm/drm_plane.h> #include <drm/drm_drv.h> #include <drm/drm_print.h> #include <drm/drm_framebuffer.h> #include <drm/drm_file.h> #include <drm/drm_crtc.h> #include <drm/drm_fourcc.h> #include <drm/drm_managed.h> #include <drm/drm_vblank.h> #include "drm_crtc_internal.h" /** * DOC: overview * * A plane represents an image source that can be blended with or overlaid on * top of a CRTC during the scanout process. Planes take their input data from a * &drm_framebuffer object. The plane itself specifies the cropping and scaling * of that image, and where it is placed on the visible area of a display * pipeline, represented by &drm_crtc. A plane can also have additional * properties that specify how the pixels are positioned and blended, like * rotation or Z-position. All these properties are stored in &drm_plane_state. * * Unless explicitly specified (via CRTC property or otherwise), the active area * of a CRTC will be black by default. This means portions of the active area * which are not covered by a plane will be black, and alpha blending of any * planes with the CRTC background will blend with black at the lowest zpos. * * To create a plane, a KMS drivers allocates and zeroes an instances of * &struct drm_plane (possibly as part of a larger structure) and registers it * with a call to drm_universal_plane_init(). * * Each plane has a type, see enum drm_plane_type. A plane can be compatible * with multiple CRTCs, see &drm_plane.possible_crtcs. * * Each CRTC must have a unique primary plane userspace can attach to enable * the CRTC. In other words, userspace must be able to attach a different * primary plane to each CRTC at the same time. Primary planes can still be * compatible with multiple CRTCs. There must be exactly as many primary planes * as there are CRTCs. * * Legacy uAPI doesn't expose the primary and cursor planes directly. DRM core * relies on the driver to set the primary and optionally the cursor plane used * for legacy IOCTLs. This is done by calling drm_crtc_init_with_planes(). All * drivers must provide one primary plane per CRTC to avoid surprising legacy * userspace too much. */ /** * DOC: standard plane properties * * DRM planes have a few standardized properties: * * type: * Immutable property describing the type of the plane. * * For user-space which has enabled the &DRM_CLIENT_CAP_ATOMIC capability, * the plane type is just a hint and is mostly superseded by atomic * test-only commits. The type hint can still be used to come up more * easily with a plane configuration accepted by the driver. * * The value of this property can be one of the following: * * "Primary": * To light up a CRTC, attaching a primary plane is the most likely to * work if it covers the whole CRTC and doesn't have scaling or * cropping set up. * * Drivers may support more features for the primary plane, user-space * can find out with test-only atomic commits. * * Some primary planes are implicitly used by the kernel in the legacy * IOCTLs &DRM_IOCTL_MODE_SETCRTC and &DRM_IOCTL_MODE_PAGE_FLIP. * Therefore user-space must not mix explicit usage of any primary * plane (e.g. through an atomic commit) with these legacy IOCTLs. * * "Cursor": * To enable this plane, using a framebuffer configured without scaling * or cropping and with the following properties is the most likely to * work: * * - If the driver provides the capabilities &DRM_CAP_CURSOR_WIDTH and * &DRM_CAP_CURSOR_HEIGHT, create the framebuffer with this size. * Otherwise, create a framebuffer with the size 64x64. * - If the driver doesn't support modifiers, create a framebuffer with * a linear layout. Otherwise, use the IN_FORMATS plane property. * * Drivers may support more features for the cursor plane, user-space * can find out with test-only atomic commits. * * Some cursor planes are implicitly used by the kernel in the legacy * IOCTLs &DRM_IOCTL_MODE_CURSOR and &DRM_IOCTL_MODE_CURSOR2. * Therefore user-space must not mix explicit usage of any cursor * plane (e.g. through an atomic commit) with these legacy IOCTLs. * * Some drivers may support cursors even if no cursor plane is exposed. * In this case, the legacy cursor IOCTLs can be used to configure the * cursor. * * "Overlay": * Neither primary nor cursor. * * Overlay planes are the only planes exposed when the * &DRM_CLIENT_CAP_UNIVERSAL_PLANES capability is disabled. * * IN_FORMATS: * Blob property which contains the set of buffer format and modifier * pairs supported by this plane. The blob is a struct * drm_format_modifier_blob. Without this property the plane doesn't * support buffers with modifiers. Userspace cannot change this property. * * Note that userspace can check the &DRM_CAP_ADDFB2_MODIFIERS driver * capability for general modifier support. If this flag is set then every * plane will have the IN_FORMATS property, even when it only supports * DRM_FORMAT_MOD_LINEAR. Before linux kernel release v5.1 there have been * various bugs in this area with inconsistencies between the capability * flag and per-plane properties. * * IN_FORMATS_ASYNC: * Blob property which contains the set of buffer format and modifier * pairs supported by this plane for asynchronous flips. The blob is a struct * drm_format_modifier_blob. Userspace cannot change this property. This is an * optional property and if not present then user should expect a failure in * atomic ioctl when the modifier/format is not supported by that plane under * asynchronous flip. * * SIZE_HINTS: * Blob property which contains the set of recommended plane size * which can used for simple "cursor like" use cases (eg. no scaling). * Using these hints frees userspace from extensive probing of * supported plane sizes through atomic/setcursor ioctls. * * The blob contains an array of struct drm_plane_size_hint, in * order of preference. For optimal usage userspace should pick * the first size that satisfies its own requirements. * * Drivers should only attach this property to planes that * support a very limited set of sizes. * * Note that property value 0 (ie. no blob) is reserved for potential * future use. Current userspace is expected to ignore the property * if the value is 0, and fall back to some other means (eg. * &DRM_CAP_CURSOR_WIDTH and &DRM_CAP_CURSOR_HEIGHT) to determine * the appropriate plane size to use. */ static unsigned int drm_num_planes(struct drm_device *dev) { unsigned int num = 0; struct drm_plane *tmp; drm_for_each_plane(tmp, dev) { num++; } return num; } static inline u32 * formats_ptr(struct drm_format_modifier_blob *blob) { return (u32 *)(((char *)blob) + blob->formats_offset); } static inline struct drm_format_modifier * modifiers_ptr(struct drm_format_modifier_blob *blob) { return (struct drm_format_modifier *)(((char *)blob) + blob->modifiers_offset); } static struct drm_property_blob *create_in_format_blob(struct drm_device *dev, struct drm_plane *plane, bool (*format_mod_supported) (struct drm_plane *plane, u32 format, u64 modifier)) { struct drm_property_blob *blob; struct drm_format_modifier *mod; size_t blob_size, formats_size, modifiers_size; struct drm_format_modifier_blob *blob_data; unsigned int i, j; formats_size = sizeof(__u32) * plane->format_count; if (WARN_ON(!formats_size)) { /* 0 formats are never expected */ return ERR_PTR(-EINVAL); } modifiers_size = sizeof(struct drm_format_modifier) * plane->modifier_count; blob_size = sizeof(struct drm_format_modifier_blob); /* Modifiers offset is a pointer to a struct with a 64 bit field so it * should be naturally aligned to 8B. */ BUILD_BUG_ON(sizeof(struct drm_format_modifier_blob) % 8); blob_size += ALIGN(formats_size, 8); blob_size += modifiers_size; blob = drm_property_create_blob(dev, blob_size, NULL); if (IS_ERR(blob)) return blob; blob_data = blob->data; blob_data->version = FORMAT_BLOB_CURRENT; blob_data->count_formats = plane->format_count; blob_data->formats_offset = sizeof(struct drm_format_modifier_blob); blob_data->count_modifiers = plane->modifier_count; blob_data->modifiers_offset = ALIGN(blob_data->formats_offset + formats_size, 8); memcpy(formats_ptr(blob_data), plane->format_types, formats_size); mod = modifiers_ptr(blob_data); for (i = 0; i < plane->modifier_count; i++) { for (j = 0; j < plane->format_count; j++) { if (!format_mod_supported || format_mod_supported(plane, plane->format_types[j], plane->modifiers[i])) { mod->formats |= 1ULL << j; } } mod->modifier = plane->modifiers[i]; mod->offset = 0; mod->pad = 0; mod++; } return blob; } /** * DOC: hotspot properties * * HOTSPOT_X: property to set mouse hotspot x offset. * HOTSPOT_Y: property to set mouse hotspot y offset. * * When the plane is being used as a cursor image to display a mouse pointer, * the "hotspot" is the offset within the cursor image where mouse events * are expected to go. * * Positive values move the hotspot from the top-left corner of the cursor * plane towards the right and bottom. * * Most display drivers do not need this information because the * hotspot is not actually connected to anything visible on screen. * However, this is necessary for display drivers like the para-virtualized * drivers (eg qxl, vbox, virtio, vmwgfx), that are attached to a user console * with a mouse pointer. Since these consoles are often being remoted over a * network, they would otherwise have to wait to display the pointer movement to * the user until a full network round-trip has occurred. New mouse events have * to be sent from the user's console, over the network to the virtual input * devices, forwarded to the desktop for processing, and then the cursor plane's * position can be updated and sent back to the user's console over the network. * Instead, with the hotspot information, the console can anticipate the new * location, and draw the mouse cursor there before the confirmation comes in. * To do that correctly, the user's console must be able predict how the * desktop will process mouse events, which normally requires the desktop's * mouse topology information, ie where each CRTC sits in the mouse coordinate * space. This is typically sent to the para-virtualized drivers using some * driver-specific method, and the driver then forwards it to the console by * way of the virtual display device or hypervisor. * * The assumption is generally made that there is only one cursor plane being * used this way at a time, and that the desktop is feeding all mouse devices * into the same global pointer. Para-virtualized drivers that require this * should only be exposing a single cursor plane, or find some other way * to coordinate with a userspace desktop that supports multiple pointers. * If the hotspot properties are set, the cursor plane is therefore assumed to be * used only for displaying a mouse cursor image, and the position of the combined * cursor plane + offset can therefore be used for coordinating with input from a * mouse device. * * The cursor will then be drawn either at the location of the plane in the CRTC * console, or as a free-floating cursor plane on the user's console * corresponding to their desktop mouse position. * * DRM clients which would like to work correctly on drivers which expose * hotspot properties should advertise DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT. * Setting this property on drivers which do not special case * cursor planes will return EOPNOTSUPP, which can be used by userspace to * gauge requirements of the hardware/drivers they're running on. Advertising * DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT implies that the userspace client will be * correctly setting the hotspot properties. */ /** * drm_plane_create_hotspot_properties - creates the mouse hotspot * properties and attaches them to the given cursor plane * * @plane: drm cursor plane * * This function enables the mouse hotspot property on a given * cursor plane. Look at the documentation for hotspot properties * to get a better understanding for what they're used for. * * RETURNS: * Zero for success or -errno */ static int drm_plane_create_hotspot_properties(struct drm_plane *plane) { struct drm_property *prop_x; struct drm_property *prop_y; drm_WARN_ON(plane->dev, !drm_core_check_feature(plane->dev, DRIVER_CURSOR_HOTSPOT)); prop_x = drm_property_create_signed_range(plane->dev, 0, "HOTSPOT_X", INT_MIN, INT_MAX); if (!prop_x) return -ENOMEM; prop_y = drm_property_create_signed_range(plane->dev, 0, "HOTSPOT_Y", INT_MIN, INT_MAX); if (!prop_y) { drm_property_destroy(plane->dev, prop_x); return -ENOMEM; } drm_object_attach_property(&plane->base, prop_x, 0); drm_object_attach_property(&plane->base, prop_y, 0); plane->hotspot_x_property = prop_x; plane->hotspot_y_property = prop_y; return 0; } __printf(9, 0) static int __drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, uint32_t possible_crtcs, const struct drm_plane_funcs *funcs, const uint32_t *formats, unsigned int format_count, const uint64_t *format_modifiers, enum drm_plane_type type, const char *name, va_list ap) { struct drm_mode_config *config = &dev->mode_config; struct drm_property_blob *blob; static const uint64_t default_modifiers[] = { DRM_FORMAT_MOD_LINEAR, }; unsigned int format_modifier_count = 0; int ret; /* plane index is used with 32bit bitmasks */ if (WARN_ON(config->num_total_plane >= 32)) return -EINVAL; /* * First driver to need more than 64 formats needs to fix this. Each * format is encoded as a bit and the current code only supports a u64. */ if (WARN_ON(format_count > 64)) return -EINVAL; WARN_ON(drm_drv_uses_atomic_modeset(dev) && (!funcs->atomic_destroy_state || !funcs->atomic_duplicate_state)); ret = drm_mode_object_add(dev, &plane->base, DRM_MODE_OBJECT_PLANE); if (ret) return ret; drm_modeset_lock_init(&plane->mutex); plane->base.properties = &plane->properties; plane->dev = dev; plane->funcs = funcs; plane->format_types = kmalloc_array(format_count, sizeof(uint32_t), GFP_KERNEL); if (!plane->format_types) { DRM_DEBUG_KMS("out of memory when allocating plane\n"); drm_mode_object_unregister(dev, &plane->base); return -ENOMEM; } if (format_modifiers) { const uint64_t *temp_modifiers = format_modifiers; while (*temp_modifiers++ != DRM_FORMAT_MOD_INVALID) format_modifier_count++; } else { if (!dev->mode_config.fb_modifiers_not_supported) { format_modifiers = default_modifiers; format_modifier_count = ARRAY_SIZE(default_modifiers); } } /* autoset the cap and check for consistency across all planes */ drm_WARN_ON(dev, config->fb_modifiers_not_supported && format_modifier_count); plane->modifier_count = format_modifier_count; plane->modifiers = kmalloc_array(format_modifier_count, sizeof(*plane->modifiers), GFP_KERNEL); if (format_modifier_count && !plane->modifiers) { DRM_DEBUG_KMS("out of memory when allocating plane\n"); kfree(plane->format_types); drm_mode_object_unregister(dev, &plane->base); return -ENOMEM; } if (name) { plane->name = kvasprintf(GFP_KERNEL, name, ap); } else { plane->name = kasprintf(GFP_KERNEL, "plane-%d", drm_num_planes(dev)); } if (!plane->name) { kfree(plane->format_types); kfree(plane->modifiers); drm_mode_object_unregister(dev, &plane->base); return -ENOMEM; } memcpy(plane->format_types, formats, format_count * sizeof(uint32_t)); plane->format_count = format_count; memcpy(plane->modifiers, format_modifiers, format_modifier_count * sizeof(format_modifiers[0])); plane->possible_crtcs = possible_crtcs; plane->type = type; list_add_tail(&plane->head, &config->plane_list); plane->index = config->num_total_plane++; drm_object_attach_property(&plane->base, config->plane_type_property, plane->type); if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { drm_object_attach_property(&plane->base, config->prop_fb_id, 0); drm_object_attach_property(&plane->base, config->prop_in_fence_fd, -1); drm_object_attach_property(&plane->base, config->prop_crtc_id, 0); drm_object_attach_property(&plane->base, config->prop_crtc_x, 0); drm_object_attach_property(&plane->base, config->prop_crtc_y, 0); drm_object_attach_property(&plane->base, config->prop_crtc_w, 0); drm_object_attach_property(&plane->base, config->prop_crtc_h, 0); drm_object_attach_property(&plane->base, config->prop_src_x, 0); drm_object_attach_property(&plane->base, config->prop_src_y, 0); drm_object_attach_property(&plane->base, config->prop_src_w, 0); drm_object_attach_property(&plane->base, config->prop_src_h, 0); } if (drm_core_check_feature(dev, DRIVER_CURSOR_HOTSPOT) && type == DRM_PLANE_TYPE_CURSOR) { drm_plane_create_hotspot_properties(plane); } if (format_modifier_count) { blob = create_in_format_blob(dev, plane, plane->funcs->format_mod_supported); if (!IS_ERR(blob)) drm_object_attach_property(&plane->base, config->modifiers_property, blob->base.id); } if (plane->funcs->format_mod_supported_async) { blob = create_in_format_blob(dev, plane, plane->funcs->format_mod_supported_async); if (!IS_ERR(blob)) drm_object_attach_property(&plane->base, config->async_modifiers_property, blob->base.id); } return 0; } /** * drm_universal_plane_init - Initialize a new universal plane object * @dev: DRM device * @plane: plane object to init * @possible_crtcs: bitmask of possible CRTCs * @funcs: callbacks for the new plane * @formats: array of supported formats (DRM_FORMAT\_\*) * @format_count: number of elements in @formats * @format_modifiers: array of struct drm_format modifiers terminated by * DRM_FORMAT_MOD_INVALID * @type: type of plane (overlay, primary, cursor) * @name: printf style format string for the plane name, or NULL for default name * * Initializes a plane object of type @type. The &drm_plane_funcs.destroy hook * should call drm_plane_cleanup() and kfree() the plane structure. The plane * structure should not be allocated with devm_kzalloc(). * * Note: consider using drmm_universal_plane_alloc() instead of * drm_universal_plane_init() to let the DRM managed resource infrastructure * take care of cleanup and deallocation. * * Drivers that only support the DRM_FORMAT_MOD_LINEAR modifier support may set * @format_modifiers to NULL. The plane will advertise the linear modifier. * * Returns: * Zero on success, error code on failure. */ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, uint32_t possible_crtcs, const struct drm_plane_funcs *funcs, const uint32_t *formats, unsigned int format_count, const uint64_t *format_modifiers, enum drm_plane_type type, const char *name, ...) { va_list ap; int ret; WARN_ON(!funcs->destroy); va_start(ap, name); ret = __drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, format_modifiers, type, name, ap); va_end(ap); return ret; } EXPORT_SYMBOL(drm_universal_plane_init); static void drmm_universal_plane_alloc_release(struct drm_device *dev, void *ptr) { struct drm_plane *plane = ptr; if (WARN_ON(!plane->dev)) return; drm_plane_cleanup(plane); } void *__drmm_universal_plane_alloc(struct drm_device *dev, size_t size, size_t offset, uint32_t possible_crtcs, const struct drm_plane_funcs *funcs, const uint32_t *formats, unsigned int format_count, const uint64_t *format_modifiers, enum drm_plane_type type, const char *name, ...) { void *container; struct drm_plane *plane; va_list ap; int ret; if (WARN_ON(!funcs || funcs->destroy)) return ERR_PTR(-EINVAL); container = drmm_kzalloc(dev, size, GFP_KERNEL); if (!container) return ERR_PTR(-ENOMEM); plane = container + offset; va_start(ap, name); ret = __drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, format_modifiers, type, name, ap); va_end(ap); if (ret) return ERR_PTR(ret); ret = drmm_add_action_or_reset(dev, drmm_universal_plane_alloc_release, plane); if (ret) return ERR_PTR(ret); return container; } EXPORT_SYMBOL(__drmm_universal_plane_alloc); void *__drm_universal_plane_alloc(struct drm_device *dev, size_t size, size_t offset, uint32_t possible_crtcs, const struct drm_plane_funcs *funcs, const uint32_t *formats, unsigned int format_count, const uint64_t *format_modifiers, enum drm_plane_type type, const char *name, ...) { void *container; struct drm_plane *plane; va_list ap; int ret; if (drm_WARN_ON(dev, !funcs)) return ERR_PTR(-EINVAL); container = kzalloc(size, GFP_KERNEL); if (!container) return ERR_PTR(-ENOMEM); plane = container + offset; va_start(ap, name); ret = __drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, format_modifiers, type, name, ap); va_end(ap); if (ret) goto err_kfree; return container; err_kfree: kfree(container); return ERR_PTR(ret); } EXPORT_SYMBOL(__drm_universal_plane_alloc); int drm_plane_register_all(struct drm_device *dev) { unsigned int num_planes = 0; unsigned int num_zpos = 0; struct drm_plane *plane; int ret = 0; drm_for_each_plane(plane, dev) { if (plane->funcs->late_register) ret = plane->funcs->late_register(plane); if (ret) return ret; if (plane->zpos_property) num_zpos++; num_planes++; } drm_WARN(dev, num_zpos && num_planes != num_zpos, "Mixing planes with and without zpos property is invalid\n"); return 0; } void drm_plane_unregister_all(struct drm_device *dev) { struct drm_plane *plane; drm_for_each_plane(plane, dev) { if (plane->funcs->early_unregister) plane->funcs->early_unregister(plane); } } /** * drm_plane_cleanup - Clean up the core plane usage * @plane: plane to cleanup * * This function cleans up @plane and removes it from the DRM mode setting * core. Note that the function does *not* free the plane structure itself, * this is the responsibility of the caller. */ void drm_plane_cleanup(struct drm_plane *plane) { struct drm_device *dev = plane->dev; drm_modeset_lock_fini(&plane->mutex); kfree(plane->format_types); kfree(plane->modifiers); drm_mode_object_unregister(dev, &plane->base); BUG_ON(list_empty(&plane->head)); /* Note that the plane_list is considered to be static; should we * remove the drm_plane at runtime we would have to decrement all * the indices on the drm_plane after us in the plane_list. */ list_del(&plane->head); dev->mode_config.num_total_plane--; WARN_ON(plane->state && !plane->funcs->atomic_destroy_state); if (plane->state && plane->funcs->atomic_destroy_state) plane->funcs->atomic_destroy_state(plane, plane->state); kfree(plane->name); memset(plane, 0, sizeof(*plane)); } EXPORT_SYMBOL(drm_plane_cleanup); /** * drm_plane_from_index - find the registered plane at an index * @dev: DRM device * @idx: index of registered plane to find for * * Given a plane index, return the registered plane from DRM device's * list of planes with matching index. This is the inverse of drm_plane_index(). */ struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx) { struct drm_plane *plane; drm_for_each_plane(plane, dev) if (idx == plane->index) return plane; return NULL; } EXPORT_SYMBOL(drm_plane_from_index); /** * drm_plane_force_disable - Forcibly disable a plane * @plane: plane to disable * * Forces the plane to be disabled. * * Used when the plane's current framebuffer is destroyed, * and when restoring fbdev mode. * * Note that this function is not suitable for atomic drivers, since it doesn't * wire through the lock acquisition context properly and hence can't handle * retries or driver private locks. You probably want to use * drm_atomic_helper_disable_plane() or * drm_atomic_helper_disable_planes_on_crtc() instead. */ void drm_plane_force_disable(struct drm_plane *plane) { int ret; if (!plane->fb) return; WARN_ON(drm_drv_uses_atomic_modeset(plane->dev)); plane->old_fb = plane->fb; ret = plane->funcs->disable_plane(plane, NULL); if (ret) { DRM_ERROR("failed to disable plane with busy fb\n"); plane->old_fb = NULL; return; } /* disconnect the plane from the fb and crtc: */ drm_framebuffer_put(plane->old_fb); plane->old_fb = NULL; plane->fb = NULL; plane->crtc = NULL; } EXPORT_SYMBOL(drm_plane_force_disable); /** * drm_mode_plane_set_obj_prop - set the value of a property * @plane: drm plane object to set property value for * @property: property to set * @value: value the property should be set to * * This functions sets a given property on a given plane object. This function * calls the driver's ->set_property callback and changes the software state of * the property if the callback succeeds. * * Returns: * Zero on success, error code on failure. */ int drm_mode_plane_set_obj_prop(struct drm_plane *plane, struct drm_property *property, uint64_t value) { int ret = -EINVAL; struct drm_mode_object *obj = &plane->base; if (plane->funcs->set_property) ret = plane->funcs->set_property(plane, property, value); if (!ret) drm_object_property_set_value(obj, property, value); return ret; } EXPORT_SYMBOL(drm_mode_plane_set_obj_prop); int drm_mode_getplane_res(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_get_plane_res *plane_resp = data; struct drm_plane *plane; uint32_t __user *plane_ptr; int count = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; plane_ptr = u64_to_user_ptr(plane_resp->plane_id_ptr); /* * This ioctl is called twice, once to determine how much space is * needed, and the 2nd time to fill it. */ drm_for_each_plane(plane, dev) { /* * Unless userspace set the 'universal planes' * capability bit, only advertise overlays. */ if (plane->type != DRM_PLANE_TYPE_OVERLAY && !file_priv->universal_planes) continue; /* * If we're running on a virtualized driver then, * unless userspace advertizes support for the * virtualized cursor plane, disable cursor planes * because they'll be broken due to missing cursor * hotspot info. */ if (plane->type == DRM_PLANE_TYPE_CURSOR && drm_core_check_feature(dev, DRIVER_CURSOR_HOTSPOT) && file_priv->atomic && !file_priv->supports_virtualized_cursor_plane) continue; if (drm_lease_held(file_priv, plane->base.id)) { if (count < plane_resp->count_planes && put_user(plane->base.id, plane_ptr + count)) return -EFAULT; count++; } } plane_resp->count_planes = count; return 0; } int drm_mode_getplane(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_get_plane *plane_resp = data; struct drm_plane *plane; uint32_t __user *format_ptr; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; plane = drm_plane_find(dev, file_priv, plane_resp->plane_id); if (!plane) return -ENOENT; drm_modeset_lock(&plane->mutex, NULL); if (plane->state && plane->state->crtc && drm_lease_held(file_priv, plane->state->crtc->base.id)) plane_resp->crtc_id = plane->state->crtc->base.id; else if (!plane->state && plane->crtc && drm_lease_held(file_priv, plane->crtc->base.id)) plane_resp->crtc_id = plane->crtc->base.id; else plane_resp->crtc_id = 0; if (plane->state && plane->state->fb) plane_resp->fb_id = plane->state->fb->base.id; else if (!plane->state && plane->fb) plane_resp->fb_id = plane->fb->base.id; else plane_resp->fb_id = 0; drm_modeset_unlock(&plane->mutex); plane_resp->plane_id = plane->base.id; plane_resp->possible_crtcs = drm_lease_filter_crtcs(file_priv, plane->possible_crtcs); plane_resp->gamma_size = 0; /* * This ioctl is called twice, once to determine how much space is * needed, and the 2nd time to fill it. */ if (plane->format_count && (plane_resp->count_format_types >= plane->format_count)) { format_ptr = (uint32_t __user *)(unsigned long)plane_resp->format_type_ptr; if (copy_to_user(format_ptr, plane->format_types, sizeof(uint32_t) * plane->format_count)) { return -EFAULT; } } plane_resp->count_format_types = plane->format_count; return 0; } /** * drm_plane_has_format - Check whether the plane supports this format and modifier combination * @plane: drm plane * @format: pixel format (DRM_FORMAT_*) * @modifier: data layout modifier * * Returns: * Whether the plane supports the specified format and modifier combination. */ bool drm_plane_has_format(struct drm_plane *plane, u32 format, u64 modifier) { unsigned int i; for (i = 0; i < plane->format_count; i++) { if (format == plane->format_types[i]) break; } if (i == plane->format_count) return false; if (plane->funcs->format_mod_supported) { if (!plane->funcs->format_mod_supported(plane, format, modifier)) return false; } else { if (!plane->modifier_count) return true; for (i = 0; i < plane->modifier_count; i++) { if (modifier == plane->modifiers[i]) break; } if (i == plane->modifier_count) return false; } return true; } EXPORT_SYMBOL(drm_plane_has_format); static int __setplane_check(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int32_t crtc_x, int32_t crtc_y, uint32_t crtc_w, uint32_t crtc_h, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h) { int ret; /* Check whether this plane is usable on this CRTC */ if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) { DRM_DEBUG_KMS("Invalid crtc for plane\n"); return -EINVAL; } /* Check whether this plane supports the fb pixel format. */ if (!drm_plane_has_format(plane, fb->format->format, fb->modifier)) { DRM_DEBUG_KMS("Invalid pixel format %p4cc, modifier 0x%llx\n", &fb->format->format, fb->modifier); return -EINVAL; } /* Give drivers some help against integer overflows */ if (crtc_w > INT_MAX || crtc_x > INT_MAX - (int32_t) crtc_w || crtc_h > INT_MAX || crtc_y > INT_MAX - (int32_t) crtc_h) { DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n", crtc_w, crtc_h, crtc_x, crtc_y); return -ERANGE; } ret = drm_framebuffer_check_src_coords(src_x, src_y, src_w, src_h, fb); if (ret) return ret; return 0; } /** * drm_any_plane_has_format - Check whether any plane supports this format and modifier combination * @dev: DRM device * @format: pixel format (DRM_FORMAT_*) * @modifier: data layout modifier * * Returns: * Whether at least one plane supports the specified format and modifier combination. */ bool drm_any_plane_has_format(struct drm_device *dev, u32 format, u64 modifier) { struct drm_plane *plane; drm_for_each_plane(plane, dev) { if (drm_plane_has_format(plane, format, modifier)) return true; } return false; } EXPORT_SYMBOL(drm_any_plane_has_format); /* * __setplane_internal - setplane handler for internal callers * * This function will take a reference on the new fb for the plane * on success. * * src_{x,y,w,h} are provided in 16.16 fixed point format */ static int __setplane_internal(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int32_t crtc_x, int32_t crtc_y, uint32_t crtc_w, uint32_t crtc_h, /* src_{x,y,w,h} values are 16.16 fixed point */ uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h, struct drm_modeset_acquire_ctx *ctx) { int ret = 0; WARN_ON(drm_drv_uses_atomic_modeset(plane->dev)); /* No fb means shut it down */ if (!fb) { plane->old_fb = plane->fb; ret = plane->funcs->disable_plane(plane, ctx); if (!ret) { plane->crtc = NULL; plane->fb = NULL; } else { plane->old_fb = NULL; } goto out; } ret = __setplane_check(plane, crtc, fb, crtc_x, crtc_y, crtc_w, crtc_h, src_x, src_y, src_w, src_h); if (ret) goto out; plane->old_fb = plane->fb; ret = plane->funcs->update_plane(plane, crtc, fb, crtc_x, crtc_y, crtc_w, crtc_h, src_x, src_y, src_w, src_h, ctx); if (!ret) { plane->crtc = crtc; plane->fb = fb; drm_framebuffer_get(plane->fb); } else { plane->old_fb = NULL; } out: if (plane->old_fb) drm_framebuffer_put(plane->old_fb); plane->old_fb = NULL; return ret; } static int __setplane_atomic(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int32_t crtc_x, int32_t crtc_y, uint32_t crtc_w, uint32_t crtc_h, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h, struct drm_modeset_acquire_ctx *ctx) { int ret; WARN_ON(!drm_drv_uses_atomic_modeset(plane->dev)); /* No fb means shut it down */ if (!fb) return plane->funcs->disable_plane(plane, ctx); /* * FIXME: This is redundant with drm_atomic_plane_check(), * but the legacy cursor/"async" .update_plane() tricks * don't call that so we still need this here. Should remove * this when all .update_plane() implementations have been * fixed to call drm_atomic_plane_check(). */ ret = __setplane_check(plane, crtc, fb, crtc_x, crtc_y, crtc_w, crtc_h, src_x, src_y, src_w, src_h); if (ret) return ret; return plane->funcs->update_plane(plane, crtc, fb, crtc_x, crtc_y, crtc_w, crtc_h, src_x, src_y, src_w, src_h, ctx); } static int setplane_internal(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int32_t crtc_x, int32_t crtc_y, uint32_t crtc_w, uint32_t crtc_h, /* src_{x,y,w,h} values are 16.16 fixed point */ uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h) { struct drm_modeset_acquire_ctx ctx; int ret; DRM_MODESET_LOCK_ALL_BEGIN(plane->dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret); if (drm_drv_uses_atomic_modeset(plane->dev)) ret = __setplane_atomic(plane, crtc, fb, crtc_x, crtc_y, crtc_w, crtc_h, src_x, src_y, src_w, src_h, &ctx); else ret = __setplane_internal(plane, crtc, fb, crtc_x, crtc_y, crtc_w, crtc_h, src_x, src_y, src_w, src_h, &ctx); DRM_MODESET_LOCK_ALL_END(plane->dev, ctx, ret); return ret; } int drm_mode_setplane(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_set_plane *plane_req = data; struct drm_plane *plane; struct drm_crtc *crtc = NULL; struct drm_framebuffer *fb = NULL; int ret; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; /* * First, find the plane, crtc, and fb objects. If not available, * we don't bother to call the driver. */ plane = drm_plane_find(dev, file_priv, plane_req->plane_id); if (!plane) { DRM_DEBUG_KMS("Unknown plane ID %d\n", plane_req->plane_id); return -ENOENT; } if (plane_req->fb_id) { fb = drm_framebuffer_lookup(dev, file_priv, plane_req->fb_id); if (!fb) { DRM_DEBUG_KMS("Unknown framebuffer ID %d\n", plane_req->fb_id); return -ENOENT; } crtc = drm_crtc_find(dev, file_priv, plane_req->crtc_id); if (!crtc) { drm_framebuffer_put(fb); DRM_DEBUG_KMS("Unknown crtc ID %d\n", plane_req->crtc_id); return -ENOENT; } } ret = setplane_internal(plane, crtc, fb, plane_req->crtc_x, plane_req->crtc_y, plane_req->crtc_w, plane_req->crtc_h, plane_req->src_x, plane_req->src_y, plane_req->src_w, plane_req->src_h); if (fb) drm_framebuffer_put(fb); return ret; } static int drm_mode_cursor_universal(struct drm_crtc *crtc, struct drm_mode_cursor2 *req, struct drm_file *file_priv, struct drm_modeset_acquire_ctx *ctx) { struct drm_device *dev = crtc->dev; struct drm_plane *plane = crtc->cursor; struct drm_framebuffer *fb = NULL; struct drm_mode_fb_cmd2 fbreq = { .width = req->width, .height = req->height, .pixel_format = DRM_FORMAT_ARGB8888, .pitches = { req->width * 4 }, .handles = { req->handle }, }; int32_t crtc_x, crtc_y; uint32_t crtc_w = 0, crtc_h = 0; uint32_t src_w = 0, src_h = 0; int ret = 0; BUG_ON(!plane); WARN_ON(plane->crtc != crtc && plane->crtc != NULL); /* * Obtain fb we'll be using (either new or existing) and take an extra * reference to it if fb != null. setplane will take care of dropping * the reference if the plane update fails. */ if (req->flags & DRM_MODE_CURSOR_BO) { if (req->handle) { fb = drm_internal_framebuffer_create(dev, &fbreq, file_priv); if (IS_ERR(fb)) { DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); return PTR_ERR(fb); } if (plane->hotspot_x_property && plane->state) plane->state->hotspot_x = req->hot_x; if (plane->hotspot_y_property && plane->state) plane->state->hotspot_y = req->hot_y; } else { fb = NULL; } } else { if (plane->state) fb = plane->state->fb; else fb = plane->fb; if (fb) drm_framebuffer_get(fb); } if (req->flags & DRM_MODE_CURSOR_MOVE) { crtc_x = req->x; crtc_y = req->y; } else { crtc_x = crtc->cursor_x; crtc_y = crtc->cursor_y; } if (fb) { crtc_w = fb->width; crtc_h = fb->height; src_w = fb->width << 16; src_h = fb->height << 16; } if (drm_drv_uses_atomic_modeset(dev)) ret = __setplane_atomic(plane, crtc, fb, crtc_x, crtc_y, crtc_w, crtc_h, 0, 0, src_w, src_h, ctx); else ret = __setplane_internal(plane, crtc, fb, crtc_x, crtc_y, crtc_w, crtc_h, 0, 0, src_w, src_h, ctx); if (fb) drm_framebuffer_put(fb); /* Update successful; save new cursor position, if necessary */ if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) { crtc->cursor_x = req->x; crtc->cursor_y = req->y; } return ret; } static int drm_mode_cursor_common(struct drm_device *dev, struct drm_mode_cursor2 *req, struct drm_file *file_priv) { struct drm_crtc *crtc; struct drm_modeset_acquire_ctx ctx; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags)) return -EINVAL; crtc = drm_crtc_find(dev, file_priv, req->crtc_id); if (!crtc) { DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id); return -ENOENT; } drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); retry: ret = drm_modeset_lock(&crtc->mutex, &ctx); if (ret) goto out; /* * If this crtc has a universal cursor plane, call that plane's update * handler rather than using legacy cursor handlers. */ if (crtc->cursor) { ret = drm_modeset_lock(&crtc->cursor->mutex, &ctx); if (ret) goto out; if (!drm_lease_held(file_priv, crtc->cursor->base.id)) { ret = -EACCES; goto out; } ret = drm_mode_cursor_universal(crtc, req, file_priv, &ctx); goto out; } if (req->flags & DRM_MODE_CURSOR_BO) { if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) { ret = -ENXIO; goto out; } /* Turns off the cursor if handle is 0 */ if (crtc->funcs->cursor_set2) ret = crtc->funcs->cursor_set2(crtc, file_priv, req->handle, req->width, req->height, req->hot_x, req->hot_y); else ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle, req->width, req->height); } if (req->flags & DRM_MODE_CURSOR_MOVE) { if (crtc->funcs->cursor_move) { ret = crtc->funcs->cursor_move(crtc, req->x, req->y); } else { ret = -EFAULT; goto out; } } out: if (ret == -EDEADLK) { ret = drm_modeset_backoff(&ctx); if (!ret) goto retry; } drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); return ret; } int drm_mode_cursor_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_cursor *req = data; struct drm_mode_cursor2 new_req; memcpy(&new_req, req, sizeof(struct drm_mode_cursor)); new_req.hot_x = new_req.hot_y = 0; return drm_mode_cursor_common(dev, &new_req, file_priv); } /* * Set the cursor configuration based on user request. This implements the 2nd * version of the cursor ioctl, which allows userspace to additionally specify * the hotspot of the pointer. */ int drm_mode_cursor2_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_cursor2 *req = data; return drm_mode_cursor_common(dev, req, file_priv); } int drm_mode_page_flip_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_crtc_page_flip_target *page_flip = data; struct drm_crtc *crtc; struct drm_plane *plane; struct drm_framebuffer *fb = NULL, *old_fb; struct drm_pending_vblank_event *e = NULL; u32 target_vblank = page_flip->sequence; struct drm_modeset_acquire_ctx ctx; int ret = -EINVAL; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS) return -EINVAL; if (page_flip->sequence != 0 && !(page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET)) return -EINVAL; /* Only one of the DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags * can be specified */ if ((page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET) == DRM_MODE_PAGE_FLIP_TARGET) return -EINVAL; if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip) return -EINVAL; crtc = drm_crtc_find(dev, file_priv, page_flip->crtc_id); if (!crtc) return -ENOENT; plane = crtc->primary; if (!drm_lease_held(file_priv, plane->base.id)) return -EACCES; if (crtc->funcs->page_flip_target) { u32 current_vblank; int r; r = drm_crtc_vblank_get(crtc); if (r) return r; current_vblank = (u32)drm_crtc_vblank_count(crtc); switch (page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET) { case DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE: if ((int)(target_vblank - current_vblank) > 1) { DRM_DEBUG("Invalid absolute flip target %u, " "must be <= %u\n", target_vblank, current_vblank + 1); drm_crtc_vblank_put(crtc); return -EINVAL; } break; case DRM_MODE_PAGE_FLIP_TARGET_RELATIVE: if (target_vblank != 0 && target_vblank != 1) { DRM_DEBUG("Invalid relative flip target %u, " "must be 0 or 1\n", target_vblank); drm_crtc_vblank_put(crtc); return -EINVAL; } target_vblank += current_vblank; break; default: target_vblank = current_vblank + !(page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC); break; } } else if (crtc->funcs->page_flip == NULL || (page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET)) { return -EINVAL; } drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); retry: ret = drm_modeset_lock(&crtc->mutex, &ctx); if (ret) goto out; ret = drm_modeset_lock(&plane->mutex, &ctx); if (ret) goto out; if (plane->state) old_fb = plane->state->fb; else old_fb = plane->fb; if (old_fb == NULL) { /* The framebuffer is currently unbound, presumably * due to a hotplug event, that userspace has not * yet discovered. */ ret = -EBUSY; goto out; } fb = drm_framebuffer_lookup(dev, file_priv, page_flip->fb_id); if (!fb) { ret = -ENOENT; goto out; } if (plane->state) { const struct drm_plane_state *state = plane->state; ret = drm_framebuffer_check_src_coords(state->src_x, state->src_y, state->src_w, state->src_h, fb); } else { ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb); } if (ret) goto out; /* * Only check the FOURCC format code, excluding modifiers. This is * enough for all legacy drivers. Atomic drivers have their own * checks in their ->atomic_check implementation, which will * return -EINVAL if any hw or driver constraint is violated due * to modifier changes. */ if (old_fb->format->format != fb->format->format) { DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n"); ret = -EINVAL; goto out; } if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { e = kzalloc_obj(*e); if (!e) { ret = -ENOMEM; goto out; } e->event.base.type = DRM_EVENT_FLIP_COMPLETE; e->event.base.length = sizeof(e->event); e->event.vbl.user_data = page_flip->user_data; e->event.vbl.crtc_id = crtc->base.id; ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base); if (ret) { kfree(e); e = NULL; goto out; } } plane->old_fb = plane->fb; if (crtc->funcs->page_flip_target) ret = crtc->funcs->page_flip_target(crtc, fb, e, page_flip->flags, target_vblank, &ctx); else ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags, &ctx); if (ret) { if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) drm_event_cancel_free(dev, &e->base); /* Keep the old fb, don't unref it. */ plane->old_fb = NULL; } else { if (!plane->state) { plane->fb = fb; drm_framebuffer_get(fb); } } out: if (fb) drm_framebuffer_put(fb); fb = NULL; if (plane->old_fb) drm_framebuffer_put(plane->old_fb); plane->old_fb = NULL; if (ret == -EDEADLK) { ret = drm_modeset_backoff(&ctx); if (!ret) goto retry; } drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); if (ret && crtc->funcs->page_flip_target) drm_crtc_vblank_put(crtc); return ret; } /** * DOC: damage tracking * * FB_DAMAGE_CLIPS is an optional plane property which provides a means to * specify a list of damage rectangles on a plane in framebuffer coordinates of * the framebuffer attached to the plane. In current context damage is the area * of plane framebuffer that has changed since last plane update (also called * page-flip), irrespective of whether currently attached framebuffer is same as * framebuffer attached during last plane update or not. * * FB_DAMAGE_CLIPS is a hint to kernel which could be helpful for some drivers * to optimize internally especially for virtual devices where each framebuffer * change needs to be transmitted over network, usb, etc. * * Since FB_DAMAGE_CLIPS is a hint so it is an optional property. User-space can * ignore damage clips property and in that case driver will do a full plane * update. In case damage clips are provided then it is guaranteed that the area * inside damage clips will be updated to plane. For efficiency driver can do * full update or can update more than specified in damage clips. Since driver * is free to read more, user-space must always render the entire visible * framebuffer. Otherwise there can be corruptions. Also, if a user-space * provides damage clips which doesn't encompass the actual damage to * framebuffer (since last plane update) can result in incorrect rendering. * * FB_DAMAGE_CLIPS is a blob property with the layout of blob data is simply an * array of &drm_mode_rect. Unlike plane &drm_plane_state.src coordinates, * damage clips are not in 16.16 fixed point. Similar to plane src in * framebuffer, damage clips cannot be negative. In damage clip, x1/y1 are * inclusive and x2/y2 are exclusive. While kernel does not error for overlapped * damage clips, it is strongly discouraged. * * Drivers that are interested in damage interface for plane should enable * FB_DAMAGE_CLIPS property by calling drm_plane_enable_fb_damage_clips(). * Drivers implementing damage can use drm_atomic_helper_damage_iter_init() and * drm_atomic_helper_damage_iter_next() helper iterator function to get damage * rectangles clipped to &drm_plane_state.src. * * Note that there are two types of damage handling: frame damage and buffer * damage, the type of damage handling implemented depends on a driver's upload * target. Drivers implementing a per-plane or per-CRTC upload target need to * handle frame damage, while drivers implementing a per-buffer upload target * need to handle buffer damage. * * The existing damage helpers only support the frame damage type, there is no * buffer age support or similar damage accumulation algorithm implemented yet. * * Only drivers handling frame damage can use the mentioned damage helpers to * iterate over the damaged regions. Drivers that handle buffer damage, must set * &drm_plane_state.ignore_damage_clips for drm_atomic_helper_damage_iter_init() * to know that damage clips should be ignored and return &drm_plane_state.src * as the damage rectangle, to force a full plane update. * * Drivers with a per-buffer upload target could compare the &drm_plane_state.fb * of the old and new plane states to determine if the framebuffer attached to a * plane has changed or not since the last plane update. If &drm_plane_state.fb * has changed, then &drm_plane_state.ignore_damage_clips must be set to true. * * That is because drivers with a per-plane upload target, expect the backing * storage buffer to not change for a given plane. If the upload buffer changes * between page flips, the new upload buffer has to be updated as a whole. This * can be improved in the future if support for frame damage is added to the DRM * damage helpers, similarly to how user-space already handle this case as it is * explained in the following documents: * * https://registry.khronos.org/EGL/extensions/KHR/EGL_KHR_swap_buffers_with_damage.txt * https://emersion.fr/blog/2019/intro-to-damage-tracking/ */ /** * drm_plane_enable_fb_damage_clips - Enables plane fb damage clips property. * @plane: Plane on which to enable damage clips property. * * This function lets driver to enable the damage clips property on a plane. */ void drm_plane_enable_fb_damage_clips(struct drm_plane *plane) { struct drm_device *dev = plane->dev; struct drm_mode_config *config = &dev->mode_config; drm_object_attach_property(&plane->base, config->prop_fb_damage_clips, 0); } EXPORT_SYMBOL(drm_plane_enable_fb_damage_clips); /** * drm_plane_get_damage_clips_count - Returns damage clips count. * @state: Plane state. * * Simple helper to get the number of &drm_mode_rect clips set by user-space * during plane update. * * Return: Number of clips in plane fb_damage_clips blob property. */ unsigned int drm_plane_get_damage_clips_count(const struct drm_plane_state *state) { return (state && state->fb_damage_clips) ? state->fb_damage_clips->length/sizeof(struct drm_mode_rect) : 0; } EXPORT_SYMBOL(drm_plane_get_damage_clips_count); struct drm_mode_rect * __drm_plane_get_damage_clips(const struct drm_plane_state *state) { return (struct drm_mode_rect *)((state && state->fb_damage_clips) ? state->fb_damage_clips->data : NULL); } /** * drm_plane_get_damage_clips - Returns damage clips. * @state: Plane state. * * Note that this function returns uapi type &drm_mode_rect. Drivers might want * to use the helper functions drm_atomic_helper_damage_iter_init() and * drm_atomic_helper_damage_iter_next() or drm_atomic_helper_damage_merged() if * the driver can only handle a single damage region at most. * * Return: Damage clips in plane fb_damage_clips blob property. */ struct drm_mode_rect * drm_plane_get_damage_clips(const struct drm_plane_state *state) { struct drm_device *dev = state->plane->dev; struct drm_mode_config *config = &dev->mode_config; /* check that drm_plane_enable_fb_damage_clips() was called */ if (!drm_mode_obj_find_prop_id(&state->plane->base, config->prop_fb_damage_clips->base.id)) drm_warn_once(dev, "drm_plane_enable_fb_damage_clips() not called\n"); return __drm_plane_get_damage_clips(state); } EXPORT_SYMBOL(drm_plane_get_damage_clips); struct drm_property * drm_create_scaling_filter_prop(struct drm_device *dev, unsigned int supported_filters) { struct drm_property *prop; static const struct drm_prop_enum_list props[] = { { DRM_SCALING_FILTER_DEFAULT, "Default" }, { DRM_SCALING_FILTER_NEAREST_NEIGHBOR, "Nearest Neighbor" }, }; unsigned int valid_mode_mask = BIT(DRM_SCALING_FILTER_DEFAULT) | BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR); int i; if (WARN_ON((supported_filters & ~valid_mode_mask) || ((supported_filters & BIT(DRM_SCALING_FILTER_DEFAULT)) == 0))) return ERR_PTR(-EINVAL); prop = drm_property_create(dev, DRM_MODE_PROP_ENUM, "SCALING_FILTER", hweight32(supported_filters)); if (!prop) return ERR_PTR(-ENOMEM); for (i = 0; i < ARRAY_SIZE(props); i++) { int ret; if (!(BIT(props[i].type) & supported_filters)) continue; ret = drm_property_add_enum(prop, props[i].type, props[i].name); if (ret) { drm_property_destroy(dev, prop); return ERR_PTR(ret); } } return prop; } /** * drm_plane_create_scaling_filter_property - create a new scaling filter * property * * @plane: drm plane * @supported_filters: bitmask of supported scaling filters, must include * BIT(DRM_SCALING_FILTER_DEFAULT). * * This function lets driver to enable the scaling filter property on a given * plane. * * RETURNS: * Zero for success or -errno */ int drm_plane_create_scaling_filter_property(struct drm_plane *plane, unsigned int supported_filters) { struct drm_property *prop = drm_create_scaling_filter_prop(plane->dev, supported_filters); if (IS_ERR(prop)) return PTR_ERR(prop); drm_object_attach_property(&plane->base, prop, DRM_SCALING_FILTER_DEFAULT); plane->scaling_filter_property = prop; return 0; } EXPORT_SYMBOL(drm_plane_create_scaling_filter_property); /** * drm_plane_add_size_hints_property - create a size hints property * * @plane: drm plane * @hints: size hints * @num_hints: number of size hints * * Create a size hints property for the plane. * * RETURNS: * Zero for success or -errno */ int drm_plane_add_size_hints_property(struct drm_plane *plane, const struct drm_plane_size_hint *hints, int num_hints) { struct drm_device *dev = plane->dev; struct drm_mode_config *config = &dev->mode_config; struct drm_property_blob *blob; /* extending to other plane types needs actual thought */ if (drm_WARN_ON(dev, plane->type != DRM_PLANE_TYPE_CURSOR)) return -EINVAL; blob = drm_property_create_blob(dev, array_size(sizeof(hints[0]), num_hints), hints); if (IS_ERR(blob)) return PTR_ERR(blob); drm_object_attach_property(&plane->base, config->size_hints_property, blob->base.id); return 0; } EXPORT_SYMBOL(drm_plane_add_size_hints_property); /** * drm_plane_create_color_pipeline_property - create a new color pipeline * property * * @plane: drm plane * @pipelines: list of pipelines * @num_pipelines: number of pipelines * * Create the COLOR_PIPELINE plane property to specific color pipelines on * the plane. * * RETURNS: * Zero for success or -errno */ int drm_plane_create_color_pipeline_property(struct drm_plane *plane, const struct drm_prop_enum_list *pipelines, int num_pipelines) { struct drm_prop_enum_list *all_pipelines; struct drm_property *prop; int len = 0; int i; all_pipelines = kzalloc_objs(*all_pipelines, num_pipelines + 1); if (!all_pipelines) { drm_err(plane->dev, "failed to allocate color pipeline\n"); return -ENOMEM; } /* Create default Bypass color pipeline */ all_pipelines[len].type = 0; all_pipelines[len].name = "Bypass"; len++; /* Add all other color pipelines */ for (i = 0; i < num_pipelines; i++, len++) { all_pipelines[len].type = pipelines[i].type; all_pipelines[len].name = pipelines[i].name; } prop = drm_property_create_enum(plane->dev, DRM_MODE_PROP_ATOMIC, "COLOR_PIPELINE", all_pipelines, len); if (!prop) { kfree(all_pipelines); return -ENOMEM; } drm_object_attach_property(&plane->base, prop, 0); plane->color_pipeline_property = prop; kfree(all_pipelines); return 0; } EXPORT_SYMBOL(drm_plane_create_color_pipeline_property);
109 109 109 62 62 62 27 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 // SPDX-License-Identifier: GPL-2.0-or-later /****************************************************************************** * vlanproc.c VLAN Module. /proc filesystem interface. * * This module is completely hardware-independent and provides * access to the router using Linux /proc filesystem. * * Author: Ben Greear, <greearb@candelatech.com> coppied from wanproc.c * by: Gene Kozin <genek@compuserve.com> * * Copyright: (c) 1998 Ben Greear * * ============================================================================ * Jan 20, 1998 Ben Greear Initial Version *****************************************************************************/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/fs.h> #include <linux/netdevice.h> #include <linux/if_vlan.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include "vlanproc.h" #include "vlan.h" /****** Function Prototypes *************************************************/ /* Methods for preparing data for reading proc entries */ static int vlan_seq_show(struct seq_file *seq, void *v); static void *vlan_seq_start(struct seq_file *seq, loff_t *pos); static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos); static void vlan_seq_stop(struct seq_file *seq, void *); static int vlandev_seq_show(struct seq_file *seq, void *v); /* * Global Data */ /* * Names of the proc directory entries */ static const char name_root[] = "vlan"; static const char name_conf[] = "config"; /* * Structures for interfacing with the /proc filesystem. * VLAN creates its own directory /proc/net/vlan with the following * entries: * config device status/configuration * <device> entry for each device */ /* * Generic /proc/net/vlan/<file> file and inode operations */ static const struct seq_operations vlan_seq_ops = { .start = vlan_seq_start, .next = vlan_seq_next, .stop = vlan_seq_stop, .show = vlan_seq_show, }; /* * Proc filesystem directory entries. */ /* Strings */ static const char *const vlan_name_type_str[VLAN_NAME_TYPE_HIGHEST] = { [VLAN_NAME_TYPE_RAW_PLUS_VID] = "VLAN_NAME_TYPE_RAW_PLUS_VID", [VLAN_NAME_TYPE_PLUS_VID_NO_PAD] = "VLAN_NAME_TYPE_PLUS_VID_NO_PAD", [VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD] = "VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD", [VLAN_NAME_TYPE_PLUS_VID] = "VLAN_NAME_TYPE_PLUS_VID", }; /* * Interface functions */ /* * Clean up /proc/net/vlan entries */ void vlan_proc_cleanup(struct net *net) { struct vlan_net *vn = net_generic(net, vlan_net_id); if (vn->proc_vlan_conf) remove_proc_entry(name_conf, vn->proc_vlan_dir); if (vn->proc_vlan_dir) remove_proc_entry(name_root, net->proc_net); /* Dynamically added entries should be cleaned up as their vlan_device * is removed, so we should not have to take care of it here... */ } /* * Create /proc/net/vlan entries */ int __net_init vlan_proc_init(struct net *net) { struct vlan_net *vn = net_generic(net, vlan_net_id); vn->proc_vlan_dir = proc_net_mkdir(net, name_root, net->proc_net); if (!vn->proc_vlan_dir) goto err; vn->proc_vlan_conf = proc_create_net(name_conf, S_IFREG | 0600, vn->proc_vlan_dir, &vlan_seq_ops, sizeof(struct seq_net_private)); if (!vn->proc_vlan_conf) goto err; return 0; err: pr_err("can't create entry in proc filesystem!\n"); vlan_proc_cleanup(net); return -ENOBUFS; } /* * Add directory entry for VLAN device. */ int vlan_proc_add_dev(struct net_device *vlandev) { struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id); if (!strcmp(vlandev->name, name_conf)) return -EINVAL; vlan->dent = proc_create_single_data(vlandev->name, S_IFREG | 0600, vn->proc_vlan_dir, vlandev_seq_show, vlandev); if (!vlan->dent) return -ENOBUFS; return 0; } /* * Delete directory entry for VLAN device. */ void vlan_proc_rem_dev(struct net_device *vlandev) { /** NOTE: This will consume the memory pointed to by dent, it seems. */ proc_remove(vlan_dev_priv(vlandev)->dent); vlan_dev_priv(vlandev)->dent = NULL; } /****** Proc filesystem entry points ****************************************/ /* * The following few functions build the content of /proc/net/vlan/config */ static void *vlan_seq_from_index(struct seq_file *seq, loff_t *pos) { unsigned long ifindex = *pos; struct net_device *dev; for_each_netdev_dump(seq_file_net(seq), dev, ifindex) { if (!is_vlan_dev(dev)) continue; *pos = dev->ifindex; return dev; } return NULL; } static void *vlan_seq_start(struct seq_file *seq, loff_t *pos) __acquires(rcu) { rcu_read_lock(); if (*pos == 0) return SEQ_START_TOKEN; return vlan_seq_from_index(seq, pos); } static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return vlan_seq_from_index(seq, pos); } static void vlan_seq_stop(struct seq_file *seq, void *v) __releases(rcu) { rcu_read_unlock(); } static int vlan_seq_show(struct seq_file *seq, void *v) { struct net *net = seq_file_net(seq); struct vlan_net *vn = net_generic(net, vlan_net_id); if (v == SEQ_START_TOKEN) { const char *nmtype = NULL; seq_puts(seq, "VLAN Dev name | VLAN ID\n"); if (vn->name_type < ARRAY_SIZE(vlan_name_type_str)) nmtype = vlan_name_type_str[vn->name_type]; seq_printf(seq, "Name-Type: %s\n", nmtype ? nmtype : "UNKNOWN"); } else { const struct net_device *vlandev = v; const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); seq_printf(seq, "%-15s| %d | %s\n", vlandev->name, vlan->vlan_id, vlan->real_dev->name); } return 0; } static int vlandev_seq_show(struct seq_file *seq, void *offset) { struct net_device *vlandev = (struct net_device *) seq->private; const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); struct rtnl_link_stats64 temp; const struct rtnl_link_stats64 *stats; static const char fmt64[] = "%30s %12llu\n"; int i; if (!is_vlan_dev(vlandev)) return 0; stats = dev_get_stats(vlandev, &temp); seq_printf(seq, "%s VID: %d REORDER_HDR: %i dev->priv_flags: %x\n", vlandev->name, vlan->vlan_id, (int)(vlan->flags & 1), (u32)vlandev->priv_flags); seq_printf(seq, fmt64, "total frames received", stats->rx_packets); seq_printf(seq, fmt64, "total bytes received", stats->rx_bytes); seq_printf(seq, fmt64, "Broadcast/Multicast Rcvd", stats->multicast); seq_puts(seq, "\n"); seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets); seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes); seq_printf(seq, "Device: %s", vlan->real_dev->name); /* now show all PRIORITY mappings relating to this VLAN */ seq_printf(seq, "\nINGRESS priority mappings: " "0:%u 1:%u 2:%u 3:%u 4:%u 5:%u 6:%u 7:%u\n", vlan->ingress_priority_map[0], vlan->ingress_priority_map[1], vlan->ingress_priority_map[2], vlan->ingress_priority_map[3], vlan->ingress_priority_map[4], vlan->ingress_priority_map[5], vlan->ingress_priority_map[6], vlan->ingress_priority_map[7]); seq_printf(seq, " EGRESS priority mappings: "); for (i = 0; i < 16; i++) { const struct vlan_priority_tci_mapping *mp = vlan->egress_priority_map[i]; while (mp) { seq_printf(seq, "%u:%d ", mp->priority, ((mp->vlan_qos >> 13) & 0x7)); mp = mp->next; } } seq_puts(seq, "\n"); return 0; }
62 20 60 1 62 2 60 3 62 92 93 47 4 2 1 3 45 17 10 4 4 9 8 5 2 3 2 1 2 6 14 43 19 18 19 17 2 17 1 16 2 43 9 41 42 19 23 2 2 1 2 2 2 1 2 23 23 8 7 8 1 106 81 106 27 108 38 24 1 3 71 86 86 84 83 36 34 1 34 34 13 9 3 30 8 8 7 6 4 2 3 3 3 3 5 34 34 33 18 30 18 18 2 1 1 8 7 6 6 1 5 5 4 3 3 1 10 10 13 8 1 8 7 27 12 11 7 12 12 4 67 104 42 43 75 33 33 76 6 1 6 1 6 4 2 1 4 1 6 41 43 1 27 6 42 27 10 32 42 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 // SPDX-License-Identifier: GPL-2.0 /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * The options processing module for ip.c * * Authors: A.N.Kuznetsov * */ #define pr_fmt(fmt) "IPv4: " fmt #include <linux/capability.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/uaccess.h> #include <linux/unaligned.h> #include <linux/skbuff.h> #include <linux/ip.h> #include <linux/icmp.h> #include <linux/netdevice.h> #include <linux/rtnetlink.h> #include <net/sock.h> #include <net/ip.h> #include <net/icmp.h> #include <net/route.h> #include <net/cipso_ipv4.h> #include <net/ip_fib.h> /* * Write options to IP header, record destination address to * source route option, address of outgoing interface * (we should already know it, so that this function is allowed be * called only after routing decision) and timestamp, * if we originate this datagram. * * daddr is real destination address, next hop is recorded in IP header. * saddr is address of outgoing interface. */ void ip_options_build(struct sk_buff *skb, struct ip_options *opt, __be32 daddr, struct rtable *rt) { unsigned char *iph = skb_network_header(skb); memcpy(&(IPCB(skb)->opt), opt, sizeof(struct ip_options)); memcpy(iph + sizeof(struct iphdr), opt->__data, opt->optlen); opt = &(IPCB(skb)->opt); if (opt->srr) memcpy(iph + opt->srr + iph[opt->srr + 1] - 4, &daddr, 4); if (opt->rr_needaddr) ip_rt_get_source(iph + opt->rr + iph[opt->rr + 2] - 5, skb, rt); if (opt->ts_needaddr) ip_rt_get_source(iph + opt->ts + iph[opt->ts + 2] - 9, skb, rt); if (opt->ts_needtime) { __be32 midtime; midtime = inet_current_timestamp(); memcpy(iph + opt->ts + iph[opt->ts + 2] - 5, &midtime, 4); } } /* * Provided (sopt, skb) points to received options, * build in dopt compiled option set appropriate for answering. * i.e. invert SRR option, copy anothers, * and grab room in RR/TS options. * * NOTE: dopt cannot point to skb. */ int __ip_options_echo(struct net *net, struct ip_options *dopt, struct sk_buff *skb, const struct ip_options *sopt) { unsigned char *sptr, *dptr; int soffset, doffset; int optlen; memset(dopt, 0, sizeof(struct ip_options)); if (sopt->optlen == 0) return 0; sptr = skb_network_header(skb); dptr = dopt->__data; if (sopt->rr) { optlen = sptr[sopt->rr+1]; soffset = sptr[sopt->rr+2]; dopt->rr = dopt->optlen + sizeof(struct iphdr); memcpy(dptr, sptr+sopt->rr, optlen); if (sopt->rr_needaddr && soffset <= optlen) { if (soffset + 3 > optlen) return -EINVAL; dptr[2] = soffset + 4; dopt->rr_needaddr = 1; } dptr += optlen; dopt->optlen += optlen; } if (sopt->ts) { optlen = sptr[sopt->ts+1]; soffset = sptr[sopt->ts+2]; dopt->ts = dopt->optlen + sizeof(struct iphdr); memcpy(dptr, sptr+sopt->ts, optlen); if (soffset <= optlen) { if (sopt->ts_needaddr) { if (soffset + 3 > optlen) return -EINVAL; dopt->ts_needaddr = 1; soffset += 4; } if (sopt->ts_needtime) { if (soffset + 3 > optlen) return -EINVAL; if ((dptr[3]&0xF) != IPOPT_TS_PRESPEC) { dopt->ts_needtime = 1; soffset += 4; } else { dopt->ts_needtime = 0; if (soffset + 7 <= optlen) { __be32 addr; memcpy(&addr, dptr+soffset-1, 4); if (inet_addr_type(net, addr) != RTN_UNICAST) { dopt->ts_needtime = 1; soffset += 8; } } } } dptr[2] = soffset; } dptr += optlen; dopt->optlen += optlen; } if (sopt->srr) { unsigned char *start = sptr+sopt->srr; __be32 faddr; optlen = start[1]; soffset = start[2]; doffset = 0; if (soffset > optlen) soffset = optlen + 1; soffset -= 4; if (soffset > 3) { memcpy(&faddr, &start[soffset-1], 4); for (soffset -= 4, doffset = 4; soffset > 3; soffset -= 4, doffset += 4) memcpy(&dptr[doffset-1], &start[soffset-1], 4); /* * RFC1812 requires to fix illegal source routes. */ if (memcmp(&ip_hdr(skb)->saddr, &start[soffset + 3], 4) == 0) doffset -= 4; } if (doffset > 3) { dopt->faddr = faddr; dptr[0] = start[0]; dptr[1] = doffset+3; dptr[2] = 4; dptr += doffset+3; dopt->srr = dopt->optlen + sizeof(struct iphdr); dopt->optlen += doffset+3; dopt->is_strictroute = sopt->is_strictroute; } } if (sopt->cipso) { optlen = sptr[sopt->cipso+1]; dopt->cipso = dopt->optlen+sizeof(struct iphdr); memcpy(dptr, sptr+sopt->cipso, optlen); dptr += optlen; dopt->optlen += optlen; } while (dopt->optlen & 3) { *dptr++ = IPOPT_END; dopt->optlen++; } return 0; } /* * Options "fragmenting", just fill options not * allowed in fragments with NOOPs. * Simple and stupid 8), but the most efficient way. */ void ip_options_fragment(struct sk_buff *skb) { unsigned char *optptr = skb_network_header(skb) + sizeof(struct iphdr); struct ip_options *opt = &(IPCB(skb)->opt); int l = opt->optlen; int optlen; while (l > 0) { switch (*optptr) { case IPOPT_END: return; case IPOPT_NOOP: l--; optptr++; continue; } optlen = optptr[1]; if (optlen < 2 || optlen > l) return; if (!IPOPT_COPIED(*optptr)) memset(optptr, IPOPT_NOOP, optlen); l -= optlen; optptr += optlen; } opt->ts = 0; opt->rr = 0; opt->rr_needaddr = 0; opt->ts_needaddr = 0; opt->ts_needtime = 0; } /* helper used by ip_options_compile() to call fib_compute_spec_dst() * at most one time. */ static void spec_dst_fill(__be32 *spec_dst, struct sk_buff *skb) { if (*spec_dst == htonl(INADDR_ANY)) *spec_dst = fib_compute_spec_dst(skb); } /* * Verify options and fill pointers in struct options. * Caller should clear *opt, and set opt->data. * If opt == NULL, then skb->data should point to IP header. */ int __ip_options_compile(struct net *net, struct ip_options *opt, struct sk_buff *skb, __be32 *info) { __be32 spec_dst = htonl(INADDR_ANY); unsigned char *pp_ptr = NULL; struct rtable *rt = NULL; unsigned char *optptr; unsigned char *iph; int optlen, l; if (skb) { rt = skb_rtable(skb); optptr = (unsigned char *)&(ip_hdr(skb)[1]); } else optptr = opt->__data; iph = optptr - sizeof(struct iphdr); for (l = opt->optlen; l > 0; ) { switch (*optptr) { case IPOPT_END: for (optptr++, l--; l > 0; optptr++, l--) { if (*optptr != IPOPT_END) { *optptr = IPOPT_END; opt->is_changed = 1; } } goto eol; case IPOPT_NOOP: l--; optptr++; continue; } if (unlikely(l < 2)) { pp_ptr = optptr; goto error; } optlen = optptr[1]; if (optlen < 2 || optlen > l) { pp_ptr = optptr; goto error; } switch (*optptr) { case IPOPT_SSRR: case IPOPT_LSRR: if (optlen < 3) { pp_ptr = optptr + 1; goto error; } if (optptr[2] < 4) { pp_ptr = optptr + 2; goto error; } /* NB: cf RFC-1812 5.2.4.1 */ if (opt->srr) { pp_ptr = optptr; goto error; } if (!skb) { if (optptr[2] != 4 || optlen < 7 || ((optlen-3) & 3)) { pp_ptr = optptr + 1; goto error; } memcpy(&opt->faddr, &optptr[3], 4); if (optlen > 7) memmove(&optptr[3], &optptr[7], optlen-7); } opt->is_strictroute = (optptr[0] == IPOPT_SSRR); opt->srr = optptr - iph; break; case IPOPT_RR: if (opt->rr) { pp_ptr = optptr; goto error; } if (optlen < 3) { pp_ptr = optptr + 1; goto error; } if (optptr[2] < 4) { pp_ptr = optptr + 2; goto error; } if (optptr[2] <= optlen) { if (optptr[2]+3 > optlen) { pp_ptr = optptr + 2; goto error; } if (rt) { spec_dst_fill(&spec_dst, skb); memcpy(&optptr[optptr[2]-1], &spec_dst, 4); opt->is_changed = 1; } optptr[2] += 4; opt->rr_needaddr = 1; } opt->rr = optptr - iph; break; case IPOPT_TIMESTAMP: if (opt->ts) { pp_ptr = optptr; goto error; } if (optlen < 4) { pp_ptr = optptr + 1; goto error; } if (optptr[2] < 5) { pp_ptr = optptr + 2; goto error; } if (optptr[2] <= optlen) { unsigned char *timeptr = NULL; if (optptr[2]+3 > optlen) { pp_ptr = optptr + 2; goto error; } switch (optptr[3]&0xF) { case IPOPT_TS_TSONLY: if (skb) timeptr = &optptr[optptr[2]-1]; opt->ts_needtime = 1; optptr[2] += 4; break; case IPOPT_TS_TSANDADDR: if (optptr[2]+7 > optlen) { pp_ptr = optptr + 2; goto error; } if (rt) { spec_dst_fill(&spec_dst, skb); memcpy(&optptr[optptr[2]-1], &spec_dst, 4); timeptr = &optptr[optptr[2]+3]; } opt->ts_needaddr = 1; opt->ts_needtime = 1; optptr[2] += 8; break; case IPOPT_TS_PRESPEC: if (optptr[2]+7 > optlen) { pp_ptr = optptr + 2; goto error; } { __be32 addr; memcpy(&addr, &optptr[optptr[2]-1], 4); if (inet_addr_type(net, addr) == RTN_UNICAST) break; if (skb) timeptr = &optptr[optptr[2]+3]; } opt->ts_needtime = 1; optptr[2] += 8; break; default: if (!skb && !ns_capable(net->user_ns, CAP_NET_RAW)) { pp_ptr = optptr + 3; goto error; } break; } if (timeptr) { __be32 midtime; midtime = inet_current_timestamp(); memcpy(timeptr, &midtime, 4); opt->is_changed = 1; } } else if ((optptr[3]&0xF) != IPOPT_TS_PRESPEC) { unsigned int overflow = optptr[3]>>4; if (overflow == 15) { pp_ptr = optptr + 3; goto error; } if (skb) { optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4); opt->is_changed = 1; } } opt->ts = optptr - iph; break; case IPOPT_RA: if (optlen < 4) { pp_ptr = optptr + 1; goto error; } if (optptr[2] == 0 && optptr[3] == 0) opt->router_alert = optptr - iph; break; case IPOPT_CIPSO: if ((!skb && !ns_capable(net->user_ns, CAP_NET_RAW)) || opt->cipso) { pp_ptr = optptr; goto error; } opt->cipso = optptr - iph; if (cipso_v4_validate(skb, &optptr)) { pp_ptr = optptr; goto error; } break; case IPOPT_SEC: case IPOPT_SID: default: if (!skb && !ns_capable(net->user_ns, CAP_NET_RAW)) { pp_ptr = optptr; goto error; } break; } l -= optlen; optptr += optlen; } eol: if (!pp_ptr) return 0; error: if (info) *info = htonl((pp_ptr-iph)<<24); return -EINVAL; } EXPORT_SYMBOL(__ip_options_compile); int ip_options_compile(struct net *net, struct ip_options *opt, struct sk_buff *skb) { int ret; __be32 info; ret = __ip_options_compile(net, opt, skb, &info); if (ret != 0 && skb) icmp_send(skb, ICMP_PARAMETERPROB, 0, info); return ret; } EXPORT_SYMBOL(ip_options_compile); /* * Undo all the changes done by ip_options_compile(). */ void ip_options_undo(struct ip_options *opt) { if (opt->srr) { unsigned char *optptr = opt->__data + opt->srr - sizeof(struct iphdr); memmove(optptr + 7, optptr + 3, optptr[1] - 7); memcpy(optptr + 3, &opt->faddr, 4); } if (opt->rr_needaddr) { unsigned char *optptr = opt->__data + opt->rr - sizeof(struct iphdr); optptr[2] -= 4; memset(&optptr[optptr[2] - 1], 0, 4); } if (opt->ts) { unsigned char *optptr = opt->__data + opt->ts - sizeof(struct iphdr); if (opt->ts_needtime) { optptr[2] -= 4; memset(&optptr[optptr[2] - 1], 0, 4); if ((optptr[3] & 0xF) == IPOPT_TS_PRESPEC) optptr[2] -= 4; } if (opt->ts_needaddr) { optptr[2] -= 4; memset(&optptr[optptr[2] - 1], 0, 4); } } } int ip_options_get(struct net *net, struct ip_options_rcu **optp, sockptr_t data, int optlen) { struct ip_options_rcu *opt; opt = kzalloc(sizeof(struct ip_options_rcu) + ((optlen + 3) & ~3), GFP_KERNEL); if (!opt) return -ENOMEM; if (optlen && copy_from_sockptr(opt->opt.__data, data, optlen)) { kfree(opt); return -EFAULT; } while (optlen & 3) opt->opt.__data[optlen++] = IPOPT_END; opt->opt.optlen = optlen; if (optlen && ip_options_compile(net, &opt->opt, NULL)) { kfree(opt); return -EINVAL; } kfree(*optp); *optp = opt; return 0; } void ip_forward_options(struct sk_buff *skb) { struct ip_options *opt = &(IPCB(skb)->opt); unsigned char *optptr; struct rtable *rt = skb_rtable(skb); unsigned char *raw = skb_network_header(skb); if (opt->rr_needaddr) { optptr = (unsigned char *)raw + opt->rr; ip_rt_get_source(&optptr[optptr[2]-5], skb, rt); opt->is_changed = 1; } if (opt->srr_is_hit) { int srrptr, srrspace; optptr = raw + opt->srr; for ( srrptr = optptr[2], srrspace = optptr[1]; srrptr <= srrspace; srrptr += 4 ) { if (srrptr + 3 > srrspace) break; if (memcmp(&opt->nexthop, &optptr[srrptr-1], 4) == 0) break; } if (srrptr + 3 <= srrspace) { opt->is_changed = 1; ip_hdr(skb)->daddr = opt->nexthop; ip_rt_get_source(&optptr[srrptr-1], skb, rt); optptr[2] = srrptr+4; } else { net_crit_ratelimited("%s(): Argh! Destination lost!\n", __func__); } if (opt->ts_needaddr) { optptr = raw + opt->ts; ip_rt_get_source(&optptr[optptr[2]-9], skb, rt); opt->is_changed = 1; } } if (opt->is_changed) { opt->is_changed = 0; ip_send_check(ip_hdr(skb)); } } int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev) { struct ip_options *opt = &(IPCB(skb)->opt); int srrspace, srrptr; __be32 nexthop; struct iphdr *iph = ip_hdr(skb); unsigned char *optptr = skb_network_header(skb) + opt->srr; struct rtable *rt = skb_rtable(skb); struct rtable *rt2; unsigned long orefdst; int err; if (!rt) return 0; if (skb->pkt_type != PACKET_HOST) return -EINVAL; if (rt->rt_type == RTN_UNICAST) { if (!opt->is_strictroute) return 0; icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl(16<<24)); return -EINVAL; } if (rt->rt_type != RTN_LOCAL) return -EINVAL; for (srrptr = optptr[2], srrspace = optptr[1]; srrptr <= srrspace; srrptr += 4) { if (srrptr + 3 > srrspace) { icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((opt->srr+2)<<24)); return -EINVAL; } memcpy(&nexthop, &optptr[srrptr-1], 4); orefdst = skb_dstref_steal(skb); err = ip_route_input(skb, nexthop, iph->saddr, ip4h_dscp(iph), dev) ? -EINVAL : 0; rt2 = skb_rtable(skb); if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) { skb_dst_drop(skb); skb_dstref_restore(skb, orefdst); return -EINVAL; } refdst_drop(orefdst); if (rt2->rt_type != RTN_LOCAL) break; /* Superfast 8) loopback forward */ iph->daddr = nexthop; opt->is_changed = 1; } if (srrptr <= srrspace) { opt->srr_is_hit = 1; opt->nexthop = nexthop; opt->is_changed = 1; } return 0; } EXPORT_SYMBOL(ip_options_rcv_srr);
6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _FAT_H #define _FAT_H #include <linux/buffer_head.h> #include <linux/nls.h> #include <linux/hash.h> #include <linux/ratelimit.h> #include <linux/msdos_fs.h> #include <linux/fs_context.h> #include <linux/fs_parser.h> /* * vfat shortname flags */ #define VFAT_SFN_DISPLAY_LOWER 0x0001 /* convert to lowercase for display */ #define VFAT_SFN_DISPLAY_WIN95 0x0002 /* emulate win95 rule for display */ #define VFAT_SFN_DISPLAY_WINNT 0x0004 /* emulate winnt rule for display */ #define VFAT_SFN_CREATE_WIN95 0x0100 /* emulate win95 rule for create */ #define VFAT_SFN_CREATE_WINNT 0x0200 /* emulate winnt rule for create */ #define FAT_ERRORS_CONT 1 /* ignore error and continue */ #define FAT_ERRORS_PANIC 2 /* panic on error */ #define FAT_ERRORS_RO 3 /* remount r/o on error */ #define FAT_NFS_STALE_RW 1 /* NFS RW support, can cause ESTALE */ #define FAT_NFS_NOSTALE_RO 2 /* NFS RO support, no ESTALE issue */ struct fat_mount_options { kuid_t fs_uid; kgid_t fs_gid; unsigned short fs_fmask; unsigned short fs_dmask; unsigned short codepage; /* Codepage for shortname conversions */ int time_offset; /* Offset of timestamps from UTC (in minutes) */ char *iocharset; /* Charset used for filename input/display */ unsigned short shortname; /* flags for shortname display/create rule */ unsigned char name_check; /* r = relaxed, n = normal, s = strict */ unsigned char errors; /* On error: continue, panic, remount-ro */ unsigned char nfs; /* NFS support: nostale_ro, stale_rw */ unsigned short allow_utime;/* permission for setting the [am]time */ unsigned quiet:1, /* set = fake successful chmods and chowns */ showexec:1, /* set = only set x bit for com/exe/bat */ sys_immutable:1, /* set = system files are immutable */ dotsOK:1, /* set = hidden and system files are named '.filename' */ isvfat:1, /* 0=no vfat long filename support, 1=vfat support */ utf8:1, /* Use of UTF-8 character set (Default) */ unicode_xlate:1, /* create escape sequences for unhandled Unicode */ numtail:1, /* Does first alias have a numeric '~1' type tail? */ flush:1, /* write things quickly */ nocase:1, /* Does this need case conversion? 0=need case conversion*/ usefree:1, /* Use free_clusters for FAT32 */ tz_set:1, /* Filesystem timestamps' offset set */ rodir:1, /* allow ATTR_RO for directory */ discard:1, /* Issue discard requests on deletions */ dos1xfloppy:1, /* Assume default BPB for DOS 1.x floppies */ debug:1; /* Not currently used */ }; #define FAT_HASH_BITS 8 #define FAT_HASH_SIZE (1UL << FAT_HASH_BITS) /* * MS-DOS file system in-core superblock data */ struct msdos_sb_info { unsigned short sec_per_clus; /* sectors/cluster */ unsigned short cluster_bits; /* log2(cluster_size) */ unsigned int cluster_size; /* cluster size */ unsigned char fats, fat_bits; /* number of FATs, FAT bits (12,16 or 32) */ unsigned short fat_start; unsigned long fat_length; /* FAT start & length (sec.) */ unsigned long dir_start; unsigned short dir_entries; /* root dir start & entries */ unsigned long data_start; /* first data sector */ unsigned long max_cluster; /* maximum cluster number */ unsigned long root_cluster; /* first cluster of the root directory */ unsigned long fsinfo_sector; /* sector number of FAT32 fsinfo */ struct mutex fat_lock; struct mutex nfs_build_inode_lock; struct mutex s_lock; unsigned int prev_free; /* previously allocated cluster number */ unsigned int free_clusters; /* -1 if undefined */ unsigned int free_clus_valid; /* is free_clusters valid? */ struct fat_mount_options options; struct nls_table *nls_disk; /* Codepage used on disk */ struct nls_table *nls_io; /* Charset used for input and display */ const void *dir_ops; /* Opaque; default directory operations */ int dir_per_block; /* dir entries per block */ int dir_per_block_bits; /* log2(dir_per_block) */ unsigned int vol_id; /*volume ID*/ int fatent_shift; const struct fatent_operations *fatent_ops; struct inode *fat_inode; struct inode *fsinfo_inode; struct ratelimit_state ratelimit; spinlock_t inode_hash_lock; struct hlist_head inode_hashtable[FAT_HASH_SIZE]; spinlock_t dir_hash_lock; struct hlist_head dir_hashtable[FAT_HASH_SIZE]; unsigned int dirty; /* fs state before mount */ struct rcu_head rcu; }; #define FAT_CACHE_VALID 0 /* special case for valid cache */ /* * MS-DOS file system inode data in memory */ struct msdos_inode_info { spinlock_t cache_lru_lock; struct list_head cache_lru; int nr_caches; /* for avoiding the race between fat_free() and fat_get_cluster() */ unsigned int cache_valid_id; /* NOTE: mmu_private is 64bits, so must hold ->i_mutex to access */ loff_t mmu_private; /* physically allocated size */ int i_start; /* first cluster or 0 */ int i_logstart; /* logical first cluster */ int i_attrs; /* unused attribute bits */ loff_t i_pos; /* on-disk position of directory entry or 0 */ struct hlist_node i_fat_hash; /* hash by i_location */ struct hlist_node i_dir_hash; /* hash by i_logstart */ struct rw_semaphore truncate_lock; /* protect bmap against truncate */ struct timespec64 i_crtime; /* File creation (birth) time */ struct inode vfs_inode; }; struct fat_slot_info { loff_t i_pos; /* on-disk position of directory entry */ loff_t slot_off; /* offset for slot or de start */ int nr_slots; /* number of slots + 1(de) in filename */ struct msdos_dir_entry *de; struct buffer_head *bh; }; static inline struct msdos_sb_info *MSDOS_SB(struct super_block *sb) { return sb->s_fs_info; } /* * Functions that determine the variant of the FAT file system (i.e., * whether this is FAT12, FAT16 or FAT32. */ static inline bool is_fat12(const struct msdos_sb_info *sbi) { return sbi->fat_bits == 12; } static inline bool is_fat16(const struct msdos_sb_info *sbi) { return sbi->fat_bits == 16; } static inline bool is_fat32(const struct msdos_sb_info *sbi) { return sbi->fat_bits == 32; } /* Maximum number of clusters */ static inline u32 max_fat(struct super_block *sb) { struct msdos_sb_info *sbi = MSDOS_SB(sb); return is_fat32(sbi) ? MAX_FAT32 : is_fat16(sbi) ? MAX_FAT16 : MAX_FAT12; } static inline struct msdos_inode_info *MSDOS_I(struct inode *inode) { return container_of(inode, struct msdos_inode_info, vfs_inode); } /* * If ->i_mode can't hold S_IWUGO (i.e. ATTR_RO), we use ->i_attrs to * save ATTR_RO instead of ->i_mode. * * If it's directory and !sbi->options.rodir, ATTR_RO isn't read-only * bit, it's just used as flag for app. */ static inline int fat_mode_can_hold_ro(struct inode *inode) { struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); umode_t mask; if (S_ISDIR(inode->i_mode)) { if (!sbi->options.rodir) return 0; mask = ~sbi->options.fs_dmask; } else mask = ~sbi->options.fs_fmask; if (!(mask & S_IWUGO)) return 0; return 1; } /* Convert attribute bits and a mask to the UNIX mode. */ static inline umode_t fat_make_mode(struct msdos_sb_info *sbi, u8 attrs, umode_t mode) { if (attrs & ATTR_RO && !((attrs & ATTR_DIR) && !sbi->options.rodir)) mode &= ~S_IWUGO; if (attrs & ATTR_DIR) return (mode & ~sbi->options.fs_dmask) | S_IFDIR; else return (mode & ~sbi->options.fs_fmask) | S_IFREG; } /* Return the FAT attribute byte for this inode */ static inline u8 fat_make_attrs(struct inode *inode) { u8 attrs = MSDOS_I(inode)->i_attrs; if (S_ISDIR(inode->i_mode)) attrs |= ATTR_DIR; if (fat_mode_can_hold_ro(inode) && !(inode->i_mode & S_IWUGO)) attrs |= ATTR_RO; return attrs; } static inline void fat_save_attrs(struct inode *inode, u8 attrs) { if (fat_mode_can_hold_ro(inode)) MSDOS_I(inode)->i_attrs = attrs & ATTR_UNUSED; else MSDOS_I(inode)->i_attrs = attrs & (ATTR_UNUSED | ATTR_RO); } static inline unsigned char fat_checksum(const __u8 *name) { unsigned char s = name[0]; s = (s<<7) + (s>>1) + name[1]; s = (s<<7) + (s>>1) + name[2]; s = (s<<7) + (s>>1) + name[3]; s = (s<<7) + (s>>1) + name[4]; s = (s<<7) + (s>>1) + name[5]; s = (s<<7) + (s>>1) + name[6]; s = (s<<7) + (s>>1) + name[7]; s = (s<<7) + (s>>1) + name[8]; s = (s<<7) + (s>>1) + name[9]; s = (s<<7) + (s>>1) + name[10]; return s; } static inline sector_t fat_clus_to_blknr(struct msdos_sb_info *sbi, int clus) { return ((sector_t)clus - FAT_START_ENT) * sbi->sec_per_clus + sbi->data_start; } static inline void fat_get_blknr_offset(struct msdos_sb_info *sbi, loff_t i_pos, sector_t *blknr, int *offset) { *blknr = i_pos >> sbi->dir_per_block_bits; *offset = i_pos & (sbi->dir_per_block - 1); } static inline loff_t fat_i_pos_read(struct msdos_sb_info *sbi, struct inode *inode) { loff_t i_pos; #if BITS_PER_LONG == 32 spin_lock(&sbi->inode_hash_lock); #endif i_pos = MSDOS_I(inode)->i_pos; #if BITS_PER_LONG == 32 spin_unlock(&sbi->inode_hash_lock); #endif return i_pos; } static inline void fat16_towchar(wchar_t *dst, const __u8 *src, size_t len) { #ifdef __BIG_ENDIAN while (len--) { *dst++ = src[0] | (src[1] << 8); src += 2; } #else memcpy(dst, src, len * 2); #endif } static inline int fat_get_start(const struct msdos_sb_info *sbi, const struct msdos_dir_entry *de) { int cluster = le16_to_cpu(de->start); if (is_fat32(sbi)) cluster |= (le16_to_cpu(de->starthi) << 16); return cluster; } static inline void fat_set_start(struct msdos_dir_entry *de, int cluster) { de->start = cpu_to_le16(cluster); de->starthi = cpu_to_le16(cluster >> 16); } static inline void fatwchar_to16(__u8 *dst, const wchar_t *src, size_t len) { #ifdef __BIG_ENDIAN while (len--) { dst[0] = *src & 0x00FF; dst[1] = (*src & 0xFF00) >> 8; dst += 2; src++; } #else memcpy(dst, src, len * 2); #endif } /* fat/cache.c */ extern void fat_cache_inval_inode(struct inode *inode); extern int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus); extern int fat_get_mapped_cluster(struct inode *inode, sector_t sector, sector_t last_block, unsigned long *mapped_blocks, sector_t *bmap); extern int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys, unsigned long *mapped_blocks, int create, bool from_bmap); /* fat/dir.c */ extern const struct file_operations fat_dir_operations; extern int fat_search_long(struct inode *inode, const unsigned char *name, int name_len, struct fat_slot_info *sinfo); extern int fat_dir_empty(struct inode *dir); extern int fat_subdirs(struct inode *dir); extern int fat_scan(struct inode *dir, const unsigned char *name, struct fat_slot_info *sinfo); extern int fat_scan_logstart(struct inode *dir, int i_logstart, struct fat_slot_info *sinfo); extern int fat_get_dotdot_entry(struct inode *dir, struct buffer_head **bh, struct msdos_dir_entry **de); extern int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts); extern int fat_add_entries(struct inode *dir, void *slots, int nr_slots, struct fat_slot_info *sinfo); extern int fat_remove_entries(struct inode *dir, struct fat_slot_info *sinfo); /* fat/fatent.c */ struct fat_entry { int entry; union { u8 *ent12_p[2]; __le16 *ent16_p; __le32 *ent32_p; } u; int nr_bhs; struct buffer_head *bhs[2]; struct inode *fat_inode; }; static inline void fatent_init(struct fat_entry *fatent) { fatent->nr_bhs = 0; fatent->entry = 0; fatent->u.ent32_p = NULL; fatent->bhs[0] = fatent->bhs[1] = NULL; fatent->fat_inode = NULL; } static inline void fatent_set_entry(struct fat_entry *fatent, int entry) { fatent->entry = entry; fatent->u.ent32_p = NULL; } static inline void fatent_brelse(struct fat_entry *fatent) { int i; fatent->u.ent32_p = NULL; for (i = 0; i < fatent->nr_bhs; i++) brelse(fatent->bhs[i]); fatent->nr_bhs = 0; fatent->bhs[0] = fatent->bhs[1] = NULL; fatent->fat_inode = NULL; } static inline bool fat_valid_entry(struct msdos_sb_info *sbi, int entry) { return FAT_START_ENT <= entry && entry < sbi->max_cluster; } extern void fat_ent_access_init(struct super_block *sb); extern int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry); extern int fat_ent_write(struct inode *inode, struct fat_entry *fatent, int new, int wait); extern int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster); extern int fat_free_clusters(struct inode *inode, int cluster); extern int fat_count_free_clusters(struct super_block *sb); extern int fat_trim_fs(struct inode *inode, struct fstrim_range *range); /* fat/file.c */ extern long fat_generic_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); extern const struct file_operations fat_file_operations; extern const struct inode_operations fat_file_inode_operations; extern int fat_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr); extern void fat_truncate_blocks(struct inode *inode, loff_t offset); extern int fat_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int flags); extern int fat_file_fsync(struct file *file, loff_t start, loff_t end, int datasync); /* fat/inode.c */ extern int fat_block_truncate_page(struct inode *inode, loff_t from); extern void fat_attach(struct inode *inode, loff_t i_pos); extern void fat_detach(struct inode *inode); extern struct inode *fat_iget(struct super_block *sb, loff_t i_pos); extern struct inode *fat_build_inode(struct super_block *sb, struct msdos_dir_entry *de, loff_t i_pos); extern int fat_sync_inode(struct inode *inode); extern int fat_fill_super(struct super_block *sb, struct fs_context *fc, void (*setup)(struct super_block *)); extern int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de); extern int fat_flush_inodes(struct super_block *sb, struct inode *i1, struct inode *i2); extern const struct fs_parameter_spec fat_param_spec[]; int fat_init_fs_context(struct fs_context *fc, bool is_vfat); void fat_free_fc(struct fs_context *fc); int fat_parse_param(struct fs_context *fc, struct fs_parameter *param, bool is_vfat); int fat_reconfigure(struct fs_context *fc); static inline unsigned long fat_dir_hash(int logstart) { return hash_32(logstart, FAT_HASH_BITS); } extern int fat_add_cluster(struct inode *inode); /* fat/misc.c */ extern __printf(3, 4) __cold void __fat_fs_error(struct super_block *sb, int report, const char *fmt, ...); #define fat_fs_error(sb, fmt, args...) \ __fat_fs_error(sb, 1, fmt , ## args) #define fat_fs_error_ratelimit(sb, fmt, args...) \ __fat_fs_error(sb, __ratelimit(&MSDOS_SB(sb)->ratelimit), fmt , ## args) #define FAT_PRINTK_PREFIX "%sFAT-fs (%s): " #define fat_msg(sb, level, fmt, args...) \ do { \ printk_index_subsys_emit(FAT_PRINTK_PREFIX, level, fmt, ##args);\ _fat_msg(sb, level, fmt, ##args); \ } while (0) __printf(3, 4) __cold void _fat_msg(struct super_block *sb, const char *level, const char *fmt, ...); #define fat_msg_ratelimit(sb, level, fmt, args...) \ do { \ if (__ratelimit(&MSDOS_SB(sb)->ratelimit)) \ fat_msg(sb, level, fmt, ## args); \ } while (0) extern int fat_clusters_flush(struct super_block *sb); extern int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster); extern void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec64 *ts, __le16 __time, __le16 __date, u8 time_cs); extern void fat_time_unix2fat(struct msdos_sb_info *sbi, struct timespec64 *ts, __le16 *time, __le16 *date, u8 *time_cs); extern struct timespec64 fat_truncate_atime(const struct msdos_sb_info *sbi, const struct timespec64 *ts); #define FAT_UPDATE_ATIME (1u << 0) #define FAT_UPDATE_CMTIME (1u << 1) void fat_truncate_time(struct inode *inode, struct timespec64 *now, unsigned int flags); int fat_update_time(struct inode *inode, enum fs_update_time type, unsigned int flags); extern int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs); int fat_cache_init(void); void fat_cache_destroy(void); /* fat/nfs.c */ extern const struct export_operations fat_export_ops; extern const struct export_operations fat_export_ops_nostale; /* helper for printk */ typedef unsigned long long llu; #endif /* !_FAT_H */
5 2 2 3 4 2 2 4 5 2 2 2 2 2 2 5 1 4 1 1 4 4 2 4 4 3 2 2 2 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 // SPDX-License-Identifier: GPL-2.0-or-later /* DataCenter TCP (DCTCP) congestion control. * * http://simula.stanford.edu/~alizade/Site/DCTCP.html * * This is an implementation of DCTCP over Reno, an enhancement to the * TCP congestion control algorithm designed for data centers. DCTCP * leverages Explicit Congestion Notification (ECN) in the network to * provide multi-bit feedback to the end hosts. DCTCP's goal is to meet * the following three data center transport requirements: * * - High burst tolerance (incast due to partition/aggregate) * - Low latency (short flows, queries) * - High throughput (continuous data updates, large file transfers) * with commodity shallow buffered switches * * The algorithm is described in detail in the following two papers: * * 1) Mohammad Alizadeh, Albert Greenberg, David A. Maltz, Jitendra Padhye, * Parveen Patel, Balaji Prabhakar, Sudipta Sengupta, and Murari Sridharan: * "Data Center TCP (DCTCP)", Data Center Networks session * Proc. ACM SIGCOMM, New Delhi, 2010. * http://simula.stanford.edu/~alizade/Site/DCTCP_files/dctcp-final.pdf * * 2) Mohammad Alizadeh, Adel Javanmard, and Balaji Prabhakar: * "Analysis of DCTCP: Stability, Convergence, and Fairness" * Proc. ACM SIGMETRICS, San Jose, 2011. * http://simula.stanford.edu/~alizade/Site/DCTCP_files/dctcp_analysis-full.pdf * * Initial prototype from Abdul Kabbani, Masato Yasuda and Mohammad Alizadeh. * * Authors: * * Daniel Borkmann <dborkman@redhat.com> * Florian Westphal <fw@strlen.de> * Glenn Judd <glenn.judd@morganstanley.com> */ #include <linux/btf.h> #include <linux/btf_ids.h> #include <linux/module.h> #include <linux/mm.h> #include <net/tcp.h> #include <linux/inet_diag.h> #include "tcp_dctcp.h" #define DCTCP_MAX_ALPHA 1024U struct dctcp { u32 old_delivered; u32 old_delivered_ce; u32 prior_rcv_nxt; u32 dctcp_alpha; u32 next_seq; u32 ce_state; u32 loss_cwnd; struct tcp_plb_state plb; }; static unsigned int dctcp_shift_g __read_mostly = 4; /* g = 1/2^4 */ static int dctcp_shift_g_set(const char *val, const struct kernel_param *kp) { return param_set_uint_minmax(val, kp, 0, 10); } static const struct kernel_param_ops dctcp_shift_g_ops = { .set = dctcp_shift_g_set, .get = param_get_uint, }; module_param_cb(dctcp_shift_g, &dctcp_shift_g_ops, &dctcp_shift_g, 0644); MODULE_PARM_DESC(dctcp_shift_g, "parameter g for updating dctcp_alpha"); static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA; module_param(dctcp_alpha_on_init, uint, 0644); MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value"); static struct tcp_congestion_ops dctcp_reno; static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca) { ca->next_seq = tp->snd_nxt; ca->old_delivered = tp->delivered; ca->old_delivered_ce = tp->delivered_ce; } __bpf_kfunc static void dctcp_init(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); if (tcp_ecn_mode_any(tp) || (sk->sk_state == TCP_LISTEN || sk->sk_state == TCP_CLOSE)) { struct dctcp *ca = inet_csk_ca(sk); ca->prior_rcv_nxt = tp->rcv_nxt; ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); ca->loss_cwnd = 0; ca->ce_state = 0; dctcp_reset(tp, ca); tcp_plb_init(sk, &ca->plb); return; } /* No ECN support? Fall back to Reno. Also need to clear * ECT from sk since it is set during 3WHS for DCTCP. */ inet_csk(sk)->icsk_ca_ops = &dctcp_reno; INET_ECN_dontxmit(sk); } __bpf_kfunc static u32 dctcp_ssthresh(struct sock *sk) { struct dctcp *ca = inet_csk_ca(sk); struct tcp_sock *tp = tcp_sk(sk); ca->loss_cwnd = tcp_snd_cwnd(tp); return max(tcp_snd_cwnd(tp) - ((tcp_snd_cwnd(tp) * ca->dctcp_alpha) >> 11U), 2U); } __bpf_kfunc static void dctcp_update_alpha(struct sock *sk, u32 flags) { const struct tcp_sock *tp = tcp_sk(sk); struct dctcp *ca = inet_csk_ca(sk); /* Expired RTT */ if (!before(tp->snd_una, ca->next_seq)) { u32 delivered = tp->delivered - ca->old_delivered; u32 delivered_ce = tp->delivered_ce - ca->old_delivered_ce; u32 alpha = ca->dctcp_alpha; u32 ce_ratio = 0; if (delivered > 0) { /* dctcp_alpha keeps EWMA of fraction of ECN marked * packets. Because of EWMA smoothing, PLB reaction can * be slow so we use ce_ratio which is an instantaneous * measure of congestion. ce_ratio is the fraction of * ECN marked packets in the previous RTT. */ if (delivered_ce > 0) ce_ratio = (delivered_ce << TCP_PLB_SCALE) / delivered; tcp_plb_update_state(sk, &ca->plb, (int)ce_ratio); tcp_plb_check_rehash(sk, &ca->plb); } /* alpha = (1 - g) * alpha + g * F */ alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g); if (delivered_ce) { /* If dctcp_shift_g == 1, a 32bit value would overflow * after 8 M packets. */ delivered_ce <<= (10 - dctcp_shift_g); delivered_ce /= max(1U, delivered); alpha = min(alpha + delivered_ce, DCTCP_MAX_ALPHA); } /* dctcp_alpha can be read from dctcp_get_info() without * synchro, so we ask compiler to not use dctcp_alpha * as a temporary variable in prior operations. */ WRITE_ONCE(ca->dctcp_alpha, alpha); dctcp_reset(tp, ca); } } static void dctcp_react_to_loss(struct sock *sk) { struct dctcp *ca = inet_csk_ca(sk); struct tcp_sock *tp = tcp_sk(sk); ca->loss_cwnd = tcp_snd_cwnd(tp); tp->snd_ssthresh = max(tcp_snd_cwnd(tp) >> 1U, 2U); } __bpf_kfunc static void dctcp_state(struct sock *sk, u8 new_state) { if (new_state == TCP_CA_Recovery && new_state != inet_csk(sk)->icsk_ca_state) dctcp_react_to_loss(sk); /* We handle RTO in dctcp_cwnd_event to ensure that we perform only * one loss-adjustment per RTT. */ } __bpf_kfunc static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) { struct dctcp *ca = inet_csk_ca(sk); switch (ev) { case CA_EVENT_ECN_IS_CE: case CA_EVENT_ECN_NO_CE: dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state); break; case CA_EVENT_LOSS: tcp_plb_update_state_upon_rto(sk, &ca->plb); dctcp_react_to_loss(sk); break; case CA_EVENT_TX_START: tcp_plb_check_rehash(sk, &ca->plb); /* Maybe rehash when inflight is 0 */ break; default: /* Don't care for the rest. */ break; } } static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr, union tcp_cc_info *info) { const struct dctcp *ca = inet_csk_ca(sk); const struct tcp_sock *tp = tcp_sk(sk); /* Fill it also in case of VEGASINFO due to req struct limits. * We can still correctly retrieve it later. */ if (ext & (1 << (INET_DIAG_DCTCPINFO - 1)) || ext & (1 << (INET_DIAG_VEGASINFO - 1))) { memset(&info->dctcp, 0, sizeof(info->dctcp)); if (inet_csk(sk)->icsk_ca_ops != &dctcp_reno) { info->dctcp.dctcp_enabled = 1; info->dctcp.dctcp_ce_state = (u16) ca->ce_state; info->dctcp.dctcp_alpha = ca->dctcp_alpha; info->dctcp.dctcp_ab_ecn = tp->mss_cache * (tp->delivered_ce - ca->old_delivered_ce); info->dctcp.dctcp_ab_tot = tp->mss_cache * (tp->delivered - ca->old_delivered); } *attr = INET_DIAG_DCTCPINFO; return sizeof(info->dctcp); } return 0; } __bpf_kfunc static u32 dctcp_cwnd_undo(struct sock *sk) { const struct dctcp *ca = inet_csk_ca(sk); struct tcp_sock *tp = tcp_sk(sk); return max(tcp_snd_cwnd(tp), ca->loss_cwnd); } static struct tcp_congestion_ops dctcp __read_mostly = { .init = dctcp_init, .in_ack_event = dctcp_update_alpha, .cwnd_event = dctcp_cwnd_event, .ssthresh = dctcp_ssthresh, .cong_avoid = tcp_reno_cong_avoid, .undo_cwnd = dctcp_cwnd_undo, .set_state = dctcp_state, .get_info = dctcp_get_info, .flags = TCP_CONG_NEEDS_ECN, .owner = THIS_MODULE, .name = "dctcp", }; static struct tcp_congestion_ops dctcp_reno __read_mostly = { .ssthresh = tcp_reno_ssthresh, .cong_avoid = tcp_reno_cong_avoid, .undo_cwnd = tcp_reno_undo_cwnd, .get_info = dctcp_get_info, .owner = THIS_MODULE, .name = "dctcp-reno", }; BTF_KFUNCS_START(tcp_dctcp_check_kfunc_ids) BTF_ID_FLAGS(func, dctcp_init) BTF_ID_FLAGS(func, dctcp_update_alpha) BTF_ID_FLAGS(func, dctcp_cwnd_event) BTF_ID_FLAGS(func, dctcp_ssthresh) BTF_ID_FLAGS(func, dctcp_cwnd_undo) BTF_ID_FLAGS(func, dctcp_state) BTF_KFUNCS_END(tcp_dctcp_check_kfunc_ids) static const struct btf_kfunc_id_set tcp_dctcp_kfunc_set = { .owner = THIS_MODULE, .set = &tcp_dctcp_check_kfunc_ids, }; static int __init dctcp_register(void) { int ret; BUILD_BUG_ON(sizeof(struct dctcp) > ICSK_CA_PRIV_SIZE); ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &tcp_dctcp_kfunc_set); if (ret < 0) return ret; return tcp_register_congestion_control(&dctcp); } static void __exit dctcp_unregister(void) { tcp_unregister_congestion_control(&dctcp); } module_init(dctcp_register); module_exit(dctcp_unregister); MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>"); MODULE_AUTHOR("Florian Westphal <fw@strlen.de>"); MODULE_AUTHOR("Glenn Judd <glenn.judd@morganstanley.com>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("DataCenter TCP (DCTCP)");
9 10 137 10 12 12 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_UIDGID_H #define _LINUX_UIDGID_H /* * A set of types for the internal kernel types representing uids and gids. * * The types defined in this header allow distinguishing which uids and gids in * the kernel are values used by userspace and which uid and gid values are * the internal kernel values. With the addition of user namespaces the values * can be different. Using the type system makes it possible for the compiler * to detect when we overlook these differences. * */ #include <linux/uidgid_types.h> #include <linux/highuid.h> struct user_namespace; extern struct user_namespace init_user_ns; struct uid_gid_map; #define KUIDT_INIT(value) (kuid_t){ value } #define KGIDT_INIT(value) (kgid_t){ value } #ifdef CONFIG_MULTIUSER static inline uid_t __kuid_val(kuid_t uid) { return uid.val; } static inline gid_t __kgid_val(kgid_t gid) { return gid.val; } #else static inline uid_t __kuid_val(kuid_t uid) { return 0; } static inline gid_t __kgid_val(kgid_t gid) { return 0; } #endif #define GLOBAL_ROOT_UID KUIDT_INIT(0) #define GLOBAL_ROOT_GID KGIDT_INIT(0) #define INVALID_UID KUIDT_INIT(-1) #define INVALID_GID KGIDT_INIT(-1) static inline bool uid_eq(kuid_t left, kuid_t right) { return __kuid_val(left) == __kuid_val(right); } static inline bool gid_eq(kgid_t left, kgid_t right) { return __kgid_val(left) == __kgid_val(right); } static inline bool uid_gt(kuid_t left, kuid_t right) { return __kuid_val(left) > __kuid_val(right); } static inline bool gid_gt(kgid_t left, kgid_t right) { return __kgid_val(left) > __kgid_val(right); } static inline bool uid_gte(kuid_t left, kuid_t right) { return __kuid_val(left) >= __kuid_val(right); } static inline bool gid_gte(kgid_t left, kgid_t right) { return __kgid_val(left) >= __kgid_val(right); } static inline bool uid_lt(kuid_t left, kuid_t right) { return __kuid_val(left) < __kuid_val(right); } static inline bool gid_lt(kgid_t left, kgid_t right) { return __kgid_val(left) < __kgid_val(right); } static inline bool uid_lte(kuid_t left, kuid_t right) { return __kuid_val(left) <= __kuid_val(right); } static inline bool gid_lte(kgid_t left, kgid_t right) { return __kgid_val(left) <= __kgid_val(right); } static inline bool uid_valid(kuid_t uid) { return __kuid_val(uid) != (uid_t) -1; } static inline bool gid_valid(kgid_t gid) { return __kgid_val(gid) != (gid_t) -1; } #ifdef CONFIG_USER_NS extern kuid_t make_kuid(struct user_namespace *from, uid_t uid); extern kgid_t make_kgid(struct user_namespace *from, gid_t gid); extern uid_t from_kuid(struct user_namespace *to, kuid_t uid); extern gid_t from_kgid(struct user_namespace *to, kgid_t gid); extern uid_t from_kuid_munged(struct user_namespace *to, kuid_t uid); extern gid_t from_kgid_munged(struct user_namespace *to, kgid_t gid); static inline bool kuid_has_mapping(struct user_namespace *ns, kuid_t uid) { return from_kuid(ns, uid) != (uid_t) -1; } static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid) { return from_kgid(ns, gid) != (gid_t) -1; } u32 map_id_down(struct uid_gid_map *map, u32 id); u32 map_id_up(struct uid_gid_map *map, u32 id); u32 map_id_range_up(struct uid_gid_map *map, u32 id, u32 count); #else static inline kuid_t make_kuid(struct user_namespace *from, uid_t uid) { return KUIDT_INIT(uid); } static inline kgid_t make_kgid(struct user_namespace *from, gid_t gid) { return KGIDT_INIT(gid); } static inline uid_t from_kuid(struct user_namespace *to, kuid_t kuid) { return __kuid_val(kuid); } static inline gid_t from_kgid(struct user_namespace *to, kgid_t kgid) { return __kgid_val(kgid); } static inline uid_t from_kuid_munged(struct user_namespace *to, kuid_t kuid) { uid_t uid = from_kuid(to, kuid); if (uid == (uid_t)-1) uid = overflowuid; return uid; } static inline gid_t from_kgid_munged(struct user_namespace *to, kgid_t kgid) { gid_t gid = from_kgid(to, kgid); if (gid == (gid_t)-1) gid = overflowgid; return gid; } static inline bool kuid_has_mapping(struct user_namespace *ns, kuid_t uid) { return uid_valid(uid); } static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid) { return gid_valid(gid); } static inline u32 map_id_down(struct uid_gid_map *map, u32 id) { return id; } static inline u32 map_id_range_up(struct uid_gid_map *map, u32 id, u32 count) { return id; } static inline u32 map_id_up(struct uid_gid_map *map, u32 id) { return id; } #endif /* CONFIG_USER_NS */ #endif /* _LINUX_UIDGID_H */
1644 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 /* SPDX-License-Identifier: GPL-2.0 */ /* * Thunderbolt driver - bus logic (NHI independent) * * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> * Copyright (C) 2018, Intel Corporation */ #ifndef TB_H_ #define TB_H_ #include <linux/debugfs.h> #include <linux/nvmem-provider.h> #include <linux/pci.h> #include <linux/thunderbolt.h> #include <linux/uuid.h> #include <linux/bitfield.h> #include "tb_regs.h" #include "ctl.h" #include "dma_port.h" /* Keep link controller awake during update */ #define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0) /* Disable CLx if not supported */ #define QUIRK_NO_CLX BIT(1) /* Need to keep power on while USB4 port is in redrive mode */ #define QUIRK_KEEP_POWER_IN_DP_REDRIVE BIT(2) /** * struct tb_nvm - Structure holding NVM information * @dev: Owner of the NVM * @major: Major version number of the active NVM portion * @minor: Minor version number of the active NVM portion * @id: Identifier used with both NVM portions * @active: Active portion NVMem device * @active_size: Size in bytes of the active NVM * @non_active: Non-active portion NVMem device * @buf: Buffer where the NVM image is stored before it is written to * the actual NVM flash device * @buf_data_start: Where the actual image starts after skipping * possible headers * @buf_data_size: Number of bytes actually consumed by the new NVM * image * @authenticating: The device is authenticating the new NVM * @flushed: The image has been flushed to the storage area * @vops: Router vendor specific NVM operations (optional) * * The user of this structure needs to handle serialization of possible * concurrent access. */ struct tb_nvm { struct device *dev; u32 major; u32 minor; int id; struct nvmem_device *active; size_t active_size; struct nvmem_device *non_active; void *buf; void *buf_data_start; size_t buf_data_size; bool authenticating; bool flushed; const struct tb_nvm_vendor_ops *vops; }; enum tb_nvm_write_ops { WRITE_AND_AUTHENTICATE = 1, WRITE_ONLY = 2, AUTHENTICATE_ONLY = 3, }; #define TB_SWITCH_KEY_SIZE 32 #define TB_SWITCH_MAX_DEPTH 6 #define USB4_SWITCH_MAX_DEPTH 5 /** * enum tb_switch_tmu_mode - TMU mode * @TB_SWITCH_TMU_MODE_OFF: TMU is off * @TB_SWITCH_TMU_MODE_LOWRES: Uni-directional, normal mode * @TB_SWITCH_TMU_MODE_HIFI_UNI: Uni-directional, HiFi mode * @TB_SWITCH_TMU_MODE_HIFI_BI: Bi-directional, HiFi mode * @TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI: Enhanced Uni-directional, MedRes mode * * Ordering is based on TMU accuracy level (highest last). */ enum tb_switch_tmu_mode { TB_SWITCH_TMU_MODE_OFF, TB_SWITCH_TMU_MODE_LOWRES, TB_SWITCH_TMU_MODE_HIFI_UNI, TB_SWITCH_TMU_MODE_HIFI_BI, TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI, }; /** * struct tb_switch_tmu - Structure holding router TMU configuration * @cap: Offset to the TMU capability (%0 if not found) * @has_ucap: Does the switch support uni-directional mode * @mode: TMU mode related to the upstream router. Reflects the HW * setting. Don't care for host router. * @mode_request: TMU mode requested to set. Related to upstream router. * Don't care for host router. */ struct tb_switch_tmu { int cap; bool has_ucap; enum tb_switch_tmu_mode mode; enum tb_switch_tmu_mode mode_request; }; /** * struct tb_switch - a thunderbolt switch * @dev: Device for the switch * @config: Switch configuration * @ports: Ports in this switch * @dma_port: If the switch has port supporting DMA configuration based * mailbox this will hold the pointer to that (%NULL * otherwise). If set it also means the switch has * upgradeable NVM. * @tmu: The switch TMU configuration * @tb: Pointer to the domain the switch belongs to * @uid: Unique ID of the switch * @uuid: UUID of the switch (or %NULL if not supported) * @vendor: Vendor ID of the switch * @device: Device ID of the switch * @vendor_name: Name of the vendor (or %NULL if not known) * @device_name: Name of the device (or %NULL if not known) * @link_speed: Speed of the link in Gb/s * @link_width: Width of the upstream facing link * @preferred_link_width: Router preferred link width (only set for Gen 4 links) * @link_usb4: Upstream link is USB4 * @generation: Switch Thunderbolt generation * @cap_plug_events: Offset to the plug events capability (%0 if not found) * @cap_vsec_tmu: Offset to the TMU vendor specific capability (%0 if not found) * @cap_lc: Offset to the link controller capability (%0 if not found) * @cap_lp: Offset to the low power (CLx for TBT) capability (%0 if not found) * @is_unplugged: The switch is going away * @drom: DROM of the switch (%NULL if not found) * @nvm: Pointer to the NVM if the switch has one (%NULL otherwise) * @no_nvm_upgrade: Prevent NVM upgrade of this switch * @safe_mode: The switch is in safe-mode * @boot: Whether the switch was already authorized on boot or not * @rpm: The switch supports runtime PM * @authorized: Whether the switch is authorized by user or policy * @security_level: Switch supported security level * @debugfs_dir: Pointer to the debugfs structure * @key: Contains the key used to challenge the device or %NULL if not * supported. Size of the key is %TB_SWITCH_KEY_SIZE. * @connection_id: Connection ID used with ICM messaging * @connection_key: Connection key used with ICM messaging * @link: Root switch link this switch is connected (ICM only) * @depth: Depth in the chain this switch is connected (ICM only) * @rpm_complete: Completion used to wait for runtime resume to * complete (ICM only) * @quirks: Quirks used for this Thunderbolt switch * @credit_allocation: Are the below buffer allocation parameters valid * @max_usb3_credits: Router preferred number of buffers for USB 3.x * @min_dp_aux_credits: Router preferred minimum number of buffers for DP AUX * @min_dp_main_credits: Router preferred minimum number of buffers for DP MAIN * @max_pcie_credits: Router preferred number of buffers for PCIe * @max_dma_credits: Router preferred number of buffers for DMA/P2P * @clx: CLx states on the upstream link of the router * @drom_blob: DROM debugfs blob wrapper * * When the switch is being added or removed to the domain (other * switches) you need to have domain lock held. * * In USB4 terminology this structure represents a router. */ struct tb_switch { struct device dev; struct tb_regs_switch_header config; struct tb_port *ports; struct tb_dma_port *dma_port; struct tb_switch_tmu tmu; struct tb *tb; u64 uid; uuid_t *uuid; u16 vendor; u16 device; const char *vendor_name; const char *device_name; unsigned int link_speed; enum tb_link_width link_width; enum tb_link_width preferred_link_width; bool link_usb4; unsigned int generation; int cap_plug_events; int cap_vsec_tmu; int cap_lc; int cap_lp; bool is_unplugged; u8 *drom; struct tb_nvm *nvm; bool no_nvm_upgrade; bool safe_mode; bool boot; bool rpm; unsigned int authorized; enum tb_security_level security_level; struct dentry *debugfs_dir; u8 *key; u8 connection_id; u8 connection_key; u8 link; u8 depth; struct completion rpm_complete; unsigned long quirks; bool credit_allocation; unsigned int max_usb3_credits; unsigned int min_dp_aux_credits; unsigned int min_dp_main_credits; unsigned int max_pcie_credits; unsigned int max_dma_credits; unsigned int clx; #ifdef CONFIG_DEBUG_FS struct debugfs_blob_wrapper drom_blob; #endif }; /** * struct tb_bandwidth_group - Bandwidth management group * @tb: Pointer to the domain the group belongs to * @index: Index of the group (aka Group_ID). Valid values %1-%7 * @ports: DP IN adapters belonging to this group are linked here * @reserved: Bandwidth released by one tunnel in the group, available * to others. This is reported as part of estimated_bw for * the group. * @release_work: Worker to release the @reserved if it is not used by * any of the tunnels. * * Any tunnel that requires isochronous bandwidth (that's DP for now) is * attached to a bandwidth group. All tunnels going through the same * USB4 links share the same group and can dynamically distribute the * bandwidth within the group. */ struct tb_bandwidth_group { struct tb *tb; int index; struct list_head ports; int reserved; struct delayed_work release_work; }; /** * struct tb_port - a thunderbolt port, part of a tb_switch * @config: Cached port configuration read from registers * @sw: Switch the port belongs to * @remote: Remote port (%NULL if not connected) * @xdomain: Remote host (%NULL if not connected) * @cap_phy: Offset, zero if not found * @cap_tmu: Offset of the adapter specific TMU capability (%0 if not present) * @cap_adap: Offset of the adapter specific capability (%0 if not present) * @cap_usb4: Offset to the USB4 port capability (%0 if not present) * @usb4: Pointer to the USB4 port structure (only if @cap_usb4 is != %0) * @port: Port number on switch * @disabled: Disabled by eeprom or enabled but not implemented * @bonded: true if the port is bonded (two lanes combined as one) * @dual_link_port: If the switch is connected using two ports, points * to the other port. * @link_nr: Is this primary or secondary port on the dual_link. * @in_hopids: Currently allocated input HopIDs * @out_hopids: Currently allocated output HopIDs * @list: Used to link ports to DP resources list * @total_credits: Total number of buffers available for this port * @ctl_credits: Buffers reserved for control path * @dma_credits: Number of credits allocated for DMA tunneling for all * DMA paths through this port. * @group: Bandwidth allocation group the adapter is assigned to. Only * used for DP IN adapters for now. * @group_list: The adapter is linked to the group's list of ports through this * @max_bw: Maximum possible bandwidth through this adapter if set to * non-zero. * @redrive: For DP IN, if true the adapter is in redrive mode. * * In USB4 terminology this structure represents an adapter (protocol or * lane adapter). */ struct tb_port { struct tb_regs_port_header config; struct tb_switch *sw; struct tb_port *remote; struct tb_xdomain *xdomain; int cap_phy; int cap_tmu; int cap_adap; int cap_usb4; struct usb4_port *usb4; u8 port; bool disabled; bool bonded; struct tb_port *dual_link_port; u8 link_nr:1; struct ida in_hopids; struct ida out_hopids; struct list_head list; unsigned int total_credits; unsigned int ctl_credits; unsigned int dma_credits; struct tb_bandwidth_group *group; struct list_head group_list; unsigned int max_bw; bool redrive; }; /** * struct usb4_port - USB4 port device * @dev: Device for the port * @port: Pointer to the lane 0 adapter * @can_offline: Does the port have necessary platform support to move * it into offline mode and back * @offline: The port is currently in offline mode * @margining: Pointer to margining structure if enabled */ struct usb4_port { struct device dev; struct tb_port *port; bool can_offline; bool offline; #ifdef CONFIG_USB4_DEBUGFS_MARGINING struct tb_margining *margining; #endif }; /** * struct tb_retimer - Thunderbolt retimer * @dev: Device for the retimer * @tb: Pointer to the domain the retimer belongs to * @index: Retimer index facing the router USB4 port * @vendor: Vendor ID of the retimer * @device: Device ID of the retimer * @port: Pointer to the lane 0 adapter * @nvm: Pointer to the NVM if the retimer has one (%NULL otherwise) * @no_nvm_upgrade: Prevent NVM upgrade of this retimer * @auth_status: Status of last NVM authentication * @margining: Pointer to margining structure if enabled */ struct tb_retimer { struct device dev; struct tb *tb; u8 index; u32 vendor; u32 device; struct tb_port *port; struct tb_nvm *nvm; bool no_nvm_upgrade; u32 auth_status; #ifdef CONFIG_USB4_DEBUGFS_MARGINING struct tb_margining *margining; #endif }; /** * struct tb_path_hop - routing information for a tb_path * @in_port: Ingress port of a switch * @out_port: Egress port of a switch where the packet is routed out * (must be on the same switch as @in_port) * @in_hop_index: HopID where the path configuration entry is placed in * the path config space of @in_port. * @in_counter_index: Used counter index (not used in the driver * currently, %-1 to disable) * @next_hop_index: HopID of the packet when it is routed out from @out_port * @initial_credits: Number of initial flow control credits allocated for * the path * @nfc_credits: Number of non-flow controlled buffers allocated for the * @in_port. * @pm_support: Set path PM packet support bit to 1 (for USB4 v2 routers) * * Hop configuration is always done on the IN port of a switch. * in_port and out_port have to be on the same switch. Packets arriving on * in_port with "hop" = in_hop_index will get routed to through out_port. The * next hop to take (on out_port->remote) is determined by * next_hop_index. When routing packet to another switch (out->remote is * set) the @next_hop_index must match the @in_hop_index of that next * hop to make routing possible. * * in_counter_index is the index of a counter (in TB_CFG_COUNTERS) on the in * port. */ struct tb_path_hop { struct tb_port *in_port; struct tb_port *out_port; int in_hop_index; int in_counter_index; int next_hop_index; unsigned int initial_credits; unsigned int nfc_credits; bool pm_support; }; /** * enum tb_path_port - path options mask * @TB_PATH_NONE: Do not activate on any hop on path * @TB_PATH_SOURCE: Activate on the first hop (out of src) * @TB_PATH_INTERNAL: Activate on the intermediate hops (not the first/last) * @TB_PATH_DESTINATION: Activate on the last hop (into dst) * @TB_PATH_ALL: Activate on all hops on the path */ enum tb_path_port { TB_PATH_NONE = 0, TB_PATH_SOURCE = 1, TB_PATH_INTERNAL = 2, TB_PATH_DESTINATION = 4, TB_PATH_ALL = 7, }; /** * struct tb_path - a unidirectional path between two ports * @tb: Pointer to the domain structure * @name: Name of the path (used for debugging) * @ingress_shared_buffer: Shared buffering used for ingress ports on the path * @egress_shared_buffer: Shared buffering used for egress ports on the path * @ingress_fc_enable: Flow control for ingress ports on the path * @egress_fc_enable: Flow control for egress ports on the path * @priority: Priority group if the path * @weight: Weight of the path inside the priority group * @drop_packages: Drop packages from queue tail or head * @activated: Is the path active * @clear_fc: Clear all flow control from the path config space entries * when deactivating this path * @hops: Path hops * @path_length: How many hops the path uses * @alloc_hopid: Does this path consume port HopID * * A path consists of a number of hops (see &struct tb_path_hop). To * establish a PCIe tunnel two paths have to be created between the two * PCIe ports. */ struct tb_path { struct tb *tb; const char *name; enum tb_path_port ingress_shared_buffer; enum tb_path_port egress_shared_buffer; enum tb_path_port ingress_fc_enable; enum tb_path_port egress_fc_enable; unsigned int priority:3; int weight:4; bool drop_packages; bool activated; bool clear_fc; struct tb_path_hop *hops; int path_length; bool alloc_hopid; }; /* HopIDs 0-7 are reserved by the Thunderbolt protocol */ #define TB_PATH_MIN_HOPID 8 /* * Support paths from the farthest (depth 6) router to the host and back * to the same level (not necessarily to the same router). */ #define TB_PATH_MAX_HOPS (7 * 2) /* Possible wake types */ #define TB_WAKE_ON_CONNECT BIT(0) #define TB_WAKE_ON_DISCONNECT BIT(1) #define TB_WAKE_ON_USB4 BIT(2) #define TB_WAKE_ON_USB3 BIT(3) #define TB_WAKE_ON_PCIE BIT(4) #define TB_WAKE_ON_DP BIT(5) /* CL states */ #define TB_CL0S BIT(0) #define TB_CL1 BIT(1) #define TB_CL2 BIT(2) /** * struct tb_cm_ops - Connection manager specific operations vector * @driver_ready: Called right after control channel is started. Used by * ICM to send driver ready message to the firmware. * @start: Starts the domain * @stop: Stops the domain * @deinit: Perform any cleanup after the domain is stopped but before * it is unregistered. Called without @tb->lock taken. Optional. * @suspend_noirq: Connection manager specific suspend_noirq * @resume_noirq: Connection manager specific resume_noirq * @suspend: Connection manager specific suspend * @freeze_noirq: Connection manager specific freeze_noirq * @thaw_noirq: Connection manager specific thaw_noirq * @complete: Connection manager specific complete * @runtime_suspend: Connection manager specific runtime_suspend * @runtime_resume: Connection manager specific runtime_resume * @runtime_suspend_switch: Runtime suspend a switch * @runtime_resume_switch: Runtime resume a switch * @handle_event: Handle thunderbolt event * @get_boot_acl: Get boot ACL list * @set_boot_acl: Set boot ACL list * @disapprove_switch: Disapprove switch (disconnect PCIe tunnel) * @approve_switch: Approve switch * @add_switch_key: Add key to switch * @challenge_switch_key: Challenge switch using key * @disconnect_pcie_paths: Disconnects PCIe paths before NVM update * @approve_xdomain_paths: Approve (establish) XDomain DMA paths * @disconnect_xdomain_paths: Disconnect XDomain DMA paths * @usb4_switch_op: Optional proxy for USB4 router operations. If set * this will be called whenever USB4 router operation is * performed. If this returns %-EOPNOTSUPP then the * native USB4 router operation is called. * @usb4_switch_nvm_authenticate_status: Optional callback that the CM * implementation can use to return * status of USB4 NVM_AUTH router * operation. */ struct tb_cm_ops { int (*driver_ready)(struct tb *tb); int (*start)(struct tb *tb, bool reset); void (*stop)(struct tb *tb); void (*deinit)(struct tb *tb); int (*suspend_noirq)(struct tb *tb); int (*resume_noirq)(struct tb *tb); int (*suspend)(struct tb *tb); int (*freeze_noirq)(struct tb *tb); int (*thaw_noirq)(struct tb *tb); void (*complete)(struct tb *tb); int (*runtime_suspend)(struct tb *tb); int (*runtime_resume)(struct tb *tb); int (*runtime_suspend_switch)(struct tb_switch *sw); int (*runtime_resume_switch)(struct tb_switch *sw); void (*handle_event)(struct tb *tb, enum tb_cfg_pkg_type, const void *buf, size_t size); int (*get_boot_acl)(struct tb *tb, uuid_t *uuids, size_t nuuids); int (*set_boot_acl)(struct tb *tb, const uuid_t *uuids, size_t nuuids); int (*disapprove_switch)(struct tb *tb, struct tb_switch *sw); int (*approve_switch)(struct tb *tb, struct tb_switch *sw); int (*add_switch_key)(struct tb *tb, struct tb_switch *sw); int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw, const u8 *challenge, u8 *response); int (*disconnect_pcie_paths)(struct tb *tb); int (*approve_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd, int transmit_path, int transmit_ring, int receive_path, int receive_ring); int (*disconnect_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd, int transmit_path, int transmit_ring, int receive_path, int receive_ring); int (*usb4_switch_op)(struct tb_switch *sw, u16 opcode, u32 *metadata, u8 *status, const void *tx_data, size_t tx_data_len, void *rx_data, size_t rx_data_len); int (*usb4_switch_nvm_authenticate_status)(struct tb_switch *sw, u32 *status); }; static inline void *tb_priv(struct tb *tb) { return (void *)tb->privdata; } #define TB_AUTOSUSPEND_DELAY 15000 /* ms */ /* helper functions & macros */ /** * tb_upstream_port() - return the upstream port of a switch * @sw: Router * * Every switch has an upstream port (for the root switch it is the NHI). * * During switch alloc/init tb_upstream_port()->remote may be NULL, even for * non root switches (on the NHI port remote is always NULL). * * Return: Pointer to &struct tb_port. */ static inline struct tb_port *tb_upstream_port(struct tb_switch *sw) { return &sw->ports[sw->config.upstream_port_number]; } /** * tb_is_upstream_port() - Is the port upstream facing * @port: Port to check * * Return: %true if @port is upstream facing port. In case of dual link * ports, both return %true. */ static inline bool tb_is_upstream_port(const struct tb_port *port) { const struct tb_port *upstream_port = tb_upstream_port(port->sw); return port == upstream_port || port->dual_link_port == upstream_port; } static inline u64 tb_route(const struct tb_switch *sw) { return ((u64) sw->config.route_hi) << 32 | sw->config.route_lo; } static inline struct tb_port *tb_port_at(u64 route, struct tb_switch *sw) { u8 port; port = route >> (sw->config.depth * 8); if (WARN_ON(port > sw->config.max_port_number)) return NULL; return &sw->ports[port]; } static inline const char *tb_width_name(enum tb_link_width width) { switch (width) { case TB_LINK_WIDTH_SINGLE: return "symmetric, single lane"; case TB_LINK_WIDTH_DUAL: return "symmetric, dual lanes"; case TB_LINK_WIDTH_ASYM_TX: return "asymmetric, 3 transmitters, 1 receiver"; case TB_LINK_WIDTH_ASYM_RX: return "asymmetric, 3 receivers, 1 transmitter"; default: return "unknown"; } } /** * tb_port_has_remote() - Does the port have switch connected downstream * @port: Port to check * * Return: %true only when the port is primary port and has remote set. */ static inline bool tb_port_has_remote(const struct tb_port *port) { if (tb_is_upstream_port(port)) return false; if (!port->remote) return false; if (port->dual_link_port && port->link_nr) return false; return true; } static inline bool tb_port_is_null(const struct tb_port *port) { return port && port->port && port->config.type == TB_TYPE_PORT; } static inline bool tb_port_is_nhi(const struct tb_port *port) { return port && port->config.type == TB_TYPE_NHI; } static inline bool tb_port_is_pcie_down(const struct tb_port *port) { return port && port->config.type == TB_TYPE_PCIE_DOWN; } static inline bool tb_port_is_pcie_up(const struct tb_port *port) { return port && port->config.type == TB_TYPE_PCIE_UP; } static inline bool tb_port_is_dpin(const struct tb_port *port) { return port && port->config.type == TB_TYPE_DP_HDMI_IN; } static inline bool tb_port_is_dpout(const struct tb_port *port) { return port && port->config.type == TB_TYPE_DP_HDMI_OUT; } static inline bool tb_port_is_usb3_down(const struct tb_port *port) { return port && port->config.type == TB_TYPE_USB3_DOWN; } static inline bool tb_port_is_usb3_up(const struct tb_port *port) { return port && port->config.type == TB_TYPE_USB3_UP; } static inline int tb_sw_read(struct tb_switch *sw, void *buffer, enum tb_cfg_space space, u32 offset, u32 length) { if (sw->is_unplugged) return -ENODEV; return tb_cfg_read(sw->tb->ctl, buffer, tb_route(sw), 0, space, offset, length); } static inline int tb_sw_write(struct tb_switch *sw, const void *buffer, enum tb_cfg_space space, u32 offset, u32 length) { if (sw->is_unplugged) return -ENODEV; return tb_cfg_write(sw->tb->ctl, buffer, tb_route(sw), 0, space, offset, length); } static inline int tb_port_read(struct tb_port *port, void *buffer, enum tb_cfg_space space, u32 offset, u32 length) { if (port->sw->is_unplugged) return -ENODEV; return tb_cfg_read(port->sw->tb->ctl, buffer, tb_route(port->sw), port->port, space, offset, length); } static inline int tb_port_write(struct tb_port *port, const void *buffer, enum tb_cfg_space space, u32 offset, u32 length) { if (port->sw->is_unplugged) return -ENODEV; return tb_cfg_write(port->sw->tb->ctl, buffer, tb_route(port->sw), port->port, space, offset, length); } #define tb_err(tb, fmt, arg...) dev_err(&(tb)->nhi->pdev->dev, fmt, ## arg) #define tb_WARN(tb, fmt, arg...) dev_WARN(&(tb)->nhi->pdev->dev, fmt, ## arg) #define tb_warn(tb, fmt, arg...) dev_warn(&(tb)->nhi->pdev->dev, fmt, ## arg) #define tb_info(tb, fmt, arg...) dev_info(&(tb)->nhi->pdev->dev, fmt, ## arg) #define tb_dbg(tb, fmt, arg...) dev_dbg(&(tb)->nhi->pdev->dev, fmt, ## arg) #define __TB_SW_PRINT(level, sw, fmt, arg...) \ do { \ const struct tb_switch *__sw = (sw); \ level(__sw->tb, "%llx: " fmt, \ tb_route(__sw), ## arg); \ } while (0) #define tb_sw_WARN(sw, fmt, arg...) __TB_SW_PRINT(tb_WARN, sw, fmt, ##arg) #define tb_sw_warn(sw, fmt, arg...) __TB_SW_PRINT(tb_warn, sw, fmt, ##arg) #define tb_sw_info(sw, fmt, arg...) __TB_SW_PRINT(tb_info, sw, fmt, ##arg) #define tb_sw_dbg(sw, fmt, arg...) __TB_SW_PRINT(tb_dbg, sw, fmt, ##arg) #define __TB_PORT_PRINT(level, _port, fmt, arg...) \ do { \ const struct tb_port *__port = (_port); \ level(__port->sw->tb, "%llx:%u: " fmt, \ tb_route(__port->sw), __port->port, ## arg); \ } while (0) #define tb_port_WARN(port, fmt, arg...) \ __TB_PORT_PRINT(tb_WARN, port, fmt, ##arg) #define tb_port_warn(port, fmt, arg...) \ __TB_PORT_PRINT(tb_warn, port, fmt, ##arg) #define tb_port_info(port, fmt, arg...) \ __TB_PORT_PRINT(tb_info, port, fmt, ##arg) #define tb_port_dbg(port, fmt, arg...) \ __TB_PORT_PRINT(tb_dbg, port, fmt, ##arg) struct tb *icm_probe(struct tb_nhi *nhi); struct tb *tb_probe(struct tb_nhi *nhi); extern const struct device_type tb_domain_type; extern const struct device_type tb_retimer_type; extern const struct device_type tb_switch_type; extern const struct device_type usb4_port_device_type; int tb_domain_init(void); void tb_domain_exit(void); int tb_xdomain_init(void); void tb_xdomain_exit(void); struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize); int tb_domain_add(struct tb *tb, bool reset); void tb_domain_remove(struct tb *tb); int tb_domain_suspend_noirq(struct tb *tb); int tb_domain_resume_noirq(struct tb *tb); int tb_domain_suspend(struct tb *tb); int tb_domain_freeze_noirq(struct tb *tb); int tb_domain_thaw_noirq(struct tb *tb); void tb_domain_complete(struct tb *tb); int tb_domain_runtime_suspend(struct tb *tb); int tb_domain_runtime_resume(struct tb *tb); int tb_domain_disapprove_switch(struct tb *tb, struct tb_switch *sw); int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw); int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw); int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw); int tb_domain_disconnect_pcie_paths(struct tb *tb); int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, int transmit_path, int transmit_ring, int receive_path, int receive_ring); int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, int transmit_path, int transmit_ring, int receive_path, int receive_ring); int tb_domain_disconnect_all_paths(struct tb *tb); static inline struct tb *tb_domain_get(struct tb *tb) { if (tb) get_device(&tb->dev); return tb; } static inline void tb_domain_put(struct tb *tb) { put_device(&tb->dev); } /** * tb_domain_event() - Notify userspace about an event in domain * @tb: Domain where event occurred * @envp: Array of uevent environment strings (can be %NULL) * * This function provides a way to notify userspace about any events * that take place in the domain. */ static inline void tb_domain_event(struct tb *tb, char *envp[]) { kobject_uevent_env(&tb->dev.kobj, KOBJ_CHANGE, envp); } struct tb_nvm *tb_nvm_alloc(struct device *dev); int tb_nvm_read_version(struct tb_nvm *nvm); int tb_nvm_validate(struct tb_nvm *nvm); int tb_nvm_write_headers(struct tb_nvm *nvm); int tb_nvm_add_active(struct tb_nvm *nvm, nvmem_reg_read_t reg_read); int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val, size_t bytes); int tb_nvm_add_non_active(struct tb_nvm *nvm, nvmem_reg_write_t reg_write); void tb_nvm_free(struct tb_nvm *nvm); void tb_nvm_exit(void); typedef int (*read_block_fn)(void *, unsigned int, void *, size_t); typedef int (*write_block_fn)(void *, unsigned int, const void *, size_t); int tb_nvm_read_data(unsigned int address, void *buf, size_t size, unsigned int retries, read_block_fn read_block, void *read_block_data); int tb_nvm_write_data(unsigned int address, const void *buf, size_t size, unsigned int retries, write_block_fn write_next_block, void *write_block_data); int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, size_t size); struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, u64 route); struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route); int tb_switch_configure(struct tb_switch *sw); int tb_switch_configuration_valid(struct tb_switch *sw); int tb_switch_add(struct tb_switch *sw); void tb_switch_remove(struct tb_switch *sw); void tb_switch_suspend(struct tb_switch *sw, bool runtime); int tb_switch_resume(struct tb_switch *sw, bool runtime); int tb_switch_reset(struct tb_switch *sw); int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit, u32 value, int timeout_msec); void tb_sw_set_unplugged(struct tb_switch *sw); struct tb_port *tb_switch_find_port(struct tb_switch *sw, enum tb_port_type type); struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth); struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid); struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route); /** * tb_switch_for_each_port() - Iterate over each switch port * @sw: Switch whose ports to iterate * @p: Port used as iterator * * Iterates over each switch port skipping the control port (port %0). */ #define tb_switch_for_each_port(sw, p) \ for ((p) = &(sw)->ports[1]; \ (p) <= &(sw)->ports[(sw)->config.max_port_number]; (p)++) static inline struct tb_switch *tb_switch_get(struct tb_switch *sw) { if (sw) get_device(&sw->dev); return sw; } static inline void tb_switch_put(struct tb_switch *sw) { put_device(&sw->dev); } static inline bool tb_is_switch(const struct device *dev) { return dev->type == &tb_switch_type; } static inline struct tb_switch *tb_to_switch(const struct device *dev) { if (tb_is_switch(dev)) return container_of(dev, struct tb_switch, dev); return NULL; } static inline struct tb_switch *tb_switch_parent(struct tb_switch *sw) { return tb_to_switch(sw->dev.parent); } /** * tb_switch_downstream_port() - Return downstream facing port of parent router * @sw: Device router pointer * * Call only for device routers. * * Return: Pointer to &struct tb_port or %NULL in case of failure. */ static inline struct tb_port *tb_switch_downstream_port(struct tb_switch *sw) { if (WARN_ON(!tb_route(sw))) return NULL; return tb_port_at(tb_route(sw), tb_switch_parent(sw)); } /** * tb_switch_depth() - Returns depth of the connected router * @sw: Router * * Return: Router depth level as a number. */ static inline int tb_switch_depth(const struct tb_switch *sw) { return sw->config.depth; } static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw) { return sw->config.vendor_id == PCI_VENDOR_ID_INTEL && sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE; } static inline bool tb_switch_is_eagle_ridge(const struct tb_switch *sw) { return sw->config.vendor_id == PCI_VENDOR_ID_INTEL && sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE; } static inline bool tb_switch_is_cactus_ridge(const struct tb_switch *sw) { if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { switch (sw->config.device_id) { case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: return true; } } return false; } static inline bool tb_switch_is_falcon_ridge(const struct tb_switch *sw) { if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { switch (sw->config.device_id) { case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: return true; } } return false; } static inline bool tb_switch_is_alpine_ridge(const struct tb_switch *sw) { if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { switch (sw->config.device_id) { case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: return true; } } return false; } static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw) { if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { switch (sw->config.device_id) { case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: return true; } } return false; } static inline bool tb_switch_is_tiger_lake(const struct tb_switch *sw) { if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { switch (sw->config.device_id) { case PCI_DEVICE_ID_INTEL_TGL_NHI0: case PCI_DEVICE_ID_INTEL_TGL_NHI1: case PCI_DEVICE_ID_INTEL_TGL_H_NHI0: case PCI_DEVICE_ID_INTEL_TGL_H_NHI1: return true; } } return false; } /** * tb_switch_is_icm() - Is the switch handled by ICM firmware * @sw: Switch to check * * In case there is a need to differentiate whether ICM firmware or SW CM * is handling @sw this function can be called. It is valid to call this * after tb_switch_alloc() and tb_switch_configure() has been called * (latter only for SW CM case). * * Return: %true if switch is handled by ICM, %false if handled by * software CM. */ static inline bool tb_switch_is_icm(const struct tb_switch *sw) { return !sw->config.enabled; } int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width); int tb_switch_configure_link(struct tb_switch *sw); void tb_switch_unconfigure_link(struct tb_switch *sw); bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in); int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in); void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in); int tb_switch_tmu_init(struct tb_switch *sw); int tb_switch_tmu_post_time(struct tb_switch *sw); int tb_switch_tmu_disable(struct tb_switch *sw); int tb_switch_tmu_enable(struct tb_switch *sw); int tb_switch_tmu_configure(struct tb_switch *sw, enum tb_switch_tmu_mode mode); /** * tb_switch_tmu_is_configured() - Is given TMU mode configured * @sw: Router whose mode to check * @mode: Mode to check * * Checks if given router TMU mode is configured to @mode. Note the * router TMU might not be enabled to this mode. * * Return: %true if TMU mode is equal to @mode, %false otherwise. */ static inline bool tb_switch_tmu_is_configured(const struct tb_switch *sw, enum tb_switch_tmu_mode mode) { return sw->tmu.mode_request == mode; } /** * tb_switch_tmu_is_enabled() - Checks if the specified TMU mode is enabled * @sw: Router whose TMU mode to check * * Return: %true if hardware TMU configuration matches the requested * configuration (and is not %TB_SWITCH_TMU_MODE_OFF), %false otherwise. */ static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw) { return sw->tmu.mode != TB_SWITCH_TMU_MODE_OFF && sw->tmu.mode == sw->tmu.mode_request; } bool tb_port_clx_is_enabled(struct tb_port *port, unsigned int clx); int tb_switch_clx_init(struct tb_switch *sw); int tb_switch_clx_enable(struct tb_switch *sw, unsigned int clx); int tb_switch_clx_disable(struct tb_switch *sw); /** * tb_switch_clx_is_enabled() - Checks if the CLx is enabled * @sw: Router to check for the CLx * @clx: The CLx states to check for * * Checks if the specified CLx is enabled on the router upstream link. * * Not applicable for a host router. * * Return: %true if any of the given states is enabled, %false otherwise. */ static inline bool tb_switch_clx_is_enabled(const struct tb_switch *sw, unsigned int clx) { return sw->clx & clx; } int tb_switch_pcie_l1_enable(struct tb_switch *sw); int tb_switch_xhci_connect(struct tb_switch *sw); void tb_switch_xhci_disconnect(struct tb_switch *sw); int tb_port_state(struct tb_port *port); int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged); int tb_port_add_nfc_credits(struct tb_port *port, int credits); int tb_port_clear_counter(struct tb_port *port, int counter); int tb_port_unlock(struct tb_port *port); int tb_port_enable(struct tb_port *port); int tb_port_disable(struct tb_port *port); int tb_port_alloc_in_hopid(struct tb_port *port, int hopid, int max_hopid); void tb_port_release_in_hopid(struct tb_port *port, int hopid); int tb_port_alloc_out_hopid(struct tb_port *port, int hopid, int max_hopid); void tb_port_release_out_hopid(struct tb_port *port, int hopid); struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, struct tb_port *prev); /** * tb_port_path_direction_downstream() - Checks if path is directed downstream * @src: Source adapter * @dst: Destination adapter * * Return: %true only if the specified path from source adapter (@src) * to destination adapter (@dst) is directed downstream. */ static inline bool tb_port_path_direction_downstream(const struct tb_port *src, const struct tb_port *dst) { return src->sw->config.depth < dst->sw->config.depth; } static inline bool tb_port_use_credit_allocation(const struct tb_port *port) { return tb_port_is_null(port) && port->sw->credit_allocation; } /** * tb_for_each_port_on_path() - Iterate over each port on path * @src: Source port * @dst: Destination port * @p: Port used as iterator * * Walks over each port on path from @src to @dst. */ #define tb_for_each_port_on_path(src, dst, p) \ for ((p) = tb_next_port_on_path((src), (dst), NULL); (p); \ (p) = tb_next_port_on_path((src), (dst), (p))) /** * tb_for_each_upstream_port_on_path() - Iterate over each upstream port on path * @src: Source port * @dst: Destination port * @p: Port used as iterator * * Walks over each upstream lane adapter on path from @src to @dst. */ #define tb_for_each_upstream_port_on_path(src, dst, p) \ for ((p) = tb_next_port_on_path((src), (dst), NULL); (p); \ (p) = tb_next_port_on_path((src), (dst), (p))) \ if (!tb_port_is_null((p)) || !tb_is_upstream_port((p))) {\ continue; \ } else int tb_port_get_link_speed(struct tb_port *port); int tb_port_get_link_generation(struct tb_port *port); int tb_port_get_link_width(struct tb_port *port); bool tb_port_width_supported(struct tb_port *port, unsigned int width); int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width); int tb_port_lane_bonding_enable(struct tb_port *port); void tb_port_lane_bonding_disable(struct tb_port *port); int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width, int timeout_msec); int tb_port_update_credits(struct tb_port *port); int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec); int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap); int tb_switch_next_cap(struct tb_switch *sw, unsigned int offset); int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap); int tb_port_next_cap(struct tb_port *port, unsigned int offset); bool tb_port_is_enabled(struct tb_port *port); bool tb_usb3_port_is_enabled(struct tb_port *port); int tb_usb3_port_enable(struct tb_port *port, bool enable); bool tb_pci_port_is_enabled(struct tb_port *port); int tb_pci_port_enable(struct tb_port *port, bool enable); int tb_dp_port_hpd_is_active(struct tb_port *port); int tb_dp_port_hpd_clear(struct tb_port *port); int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, unsigned int aux_tx, unsigned int aux_rx); bool tb_dp_port_is_enabled(struct tb_port *port); int tb_dp_port_enable(struct tb_port *port, bool enable); struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid, struct tb_port *dst, int dst_hopid, struct tb_port **last, const char *name, bool alloc_hopid); struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid, struct tb_port *dst, int dst_hopid, int link_nr, const char *name); void tb_path_free(struct tb_path *path); int tb_path_activate(struct tb_path *path); void tb_path_deactivate(struct tb_path *path); int tb_path_deactivate_hop(struct tb_port *port, int hop_index); bool tb_path_is_invalid(struct tb_path *path); bool tb_path_port_on_path(const struct tb_path *path, const struct tb_port *port); /** * tb_path_for_each_hop() - Iterate over each hop on path * @path: Path whose hops to iterate * @hop: Hop used as iterator * * Iterates over each hop on path. */ #define tb_path_for_each_hop(path, hop) \ for ((hop) = &(path)->hops[0]; \ (hop) <= &(path)->hops[(path)->path_length - 1]; (hop)++) int tb_drom_read(struct tb_switch *sw); int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid); int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid); int tb_lc_reset_port(struct tb_port *port); int tb_lc_configure_port(struct tb_port *port); void tb_lc_unconfigure_port(struct tb_port *port); int tb_lc_configure_xdomain(struct tb_port *port); void tb_lc_unconfigure_xdomain(struct tb_port *port); int tb_lc_start_lane_initialization(struct tb_port *port); bool tb_lc_is_clx_supported(struct tb_port *port); bool tb_lc_is_usb_plugged(struct tb_port *port); bool tb_lc_is_xhci_connected(struct tb_port *port); int tb_lc_xhci_connect(struct tb_port *port); void tb_lc_xhci_disconnect(struct tb_port *port); int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags); int tb_lc_set_sleep(struct tb_switch *sw); bool tb_lc_lane_bonding_possible(struct tb_switch *sw); bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in); int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in); int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in); int tb_lc_force_power(struct tb_switch *sw); static inline int tb_route_length(u64 route) { return (fls64(route) + TB_ROUTE_SHIFT - 1) / TB_ROUTE_SHIFT; } /** * tb_downstream_route() - get route to downstream switch * @port: Port to check * * Port must not be the upstream port (otherwise a loop is created). * * Return: Route to the switch behind @port. */ static inline u64 tb_downstream_route(struct tb_port *port) { return tb_route(port->sw) | ((u64) port->port << (port->sw->config.depth * 8)); } bool tb_is_xdomain_enabled(void); bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type, const void *buf, size_t size); struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent, u64 route, const uuid_t *local_uuid, const uuid_t *remote_uuid); void tb_xdomain_add(struct tb_xdomain *xd); void tb_xdomain_remove(struct tb_xdomain *xd); struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link, u8 depth); static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd) { return tb_to_switch(xd->dev.parent); } /** * tb_xdomain_downstream_port() - Return downstream facing port of parent router * @xd: Xdomain pointer * * Return: Pointer to &struct tb_port or %NULL in case of failure. */ static inline struct tb_port *tb_xdomain_downstream_port(struct tb_xdomain *xd) { return tb_port_at(xd->route, tb_xdomain_parent(xd)); } int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf, size_t size); int tb_retimer_scan(struct tb_port *port, bool add); void tb_retimer_remove_all(struct tb_port *port); static inline bool tb_is_retimer(const struct device *dev) { return dev->type == &tb_retimer_type; } static inline struct tb_retimer *tb_to_retimer(struct device *dev) { if (tb_is_retimer(dev)) return container_of(dev, struct tb_retimer, dev); return NULL; } /** * usb4_switch_version() - Returns USB4 version of the router * @sw: Router to check * * Return: Major version of USB4 router (%1 for v1, %2 for v2 and so * on). Can be called to pre-USB4 router too and in that case returns %0. */ static inline unsigned int usb4_switch_version(const struct tb_switch *sw) { return FIELD_GET(USB4_VERSION_MAJOR_MASK, sw->config.thunderbolt_version); } /** * tb_switch_is_usb4() - Is the switch USB4 compliant * @sw: Switch to check * * Return: %true if the @sw is USB4 compliant router, %false otherwise. */ static inline bool tb_switch_is_usb4(const struct tb_switch *sw) { return usb4_switch_version(sw) > 0; } void usb4_switch_check_wakes(struct tb_switch *sw); int usb4_switch_setup(struct tb_switch *sw); int usb4_switch_configuration_valid(struct tb_switch *sw); int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid); int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf, size_t size); bool usb4_switch_lane_bonding_possible(struct tb_switch *sw); int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime); int usb4_switch_set_sleep(struct tb_switch *sw); int usb4_switch_nvm_sector_size(struct tb_switch *sw); int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, size_t size); int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address); int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address, const void *buf, size_t size); int usb4_switch_nvm_authenticate(struct tb_switch *sw); int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status); int usb4_switch_credits_init(struct tb_switch *sw); bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in); int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in); int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in); struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw, const struct tb_port *port); struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw, const struct tb_port *port); int usb4_switch_add_ports(struct tb_switch *sw); void usb4_switch_remove_ports(struct tb_switch *sw); int usb4_port_unlock(struct tb_port *port); int usb4_port_hotplug_enable(struct tb_port *port); int usb4_port_reset(struct tb_port *port); int usb4_port_configure(struct tb_port *port); void usb4_port_unconfigure(struct tb_port *port); int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd); void usb4_port_unconfigure_xdomain(struct tb_port *port); int usb4_port_router_offline(struct tb_port *port); int usb4_port_router_online(struct tb_port *port); int usb4_port_enumerate_retimers(struct tb_port *port); bool usb4_port_clx_supported(struct tb_port *port); bool usb4_port_asym_supported(struct tb_port *port); int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width); int usb4_port_asym_start(struct tb_port *port); /** * enum usb4_sb_target - Sideband transaction target * @USB4_SB_TARGET_ROUTER: Target is the router itself * @USB4_SB_TARGET_PARTNER: Target is partner * @USB4_SB_TARGET_RETIMER: Target is retimer */ enum usb4_sb_target { USB4_SB_TARGET_ROUTER, USB4_SB_TARGET_PARTNER, USB4_SB_TARGET_RETIMER, }; int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target, u8 index, u8 reg, void *buf, u8 size); int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target, u8 index, u8 reg, const void *buf, u8 size); /** * enum usb4_margin_sw_error_counter - Software margining error counter operation * @USB4_MARGIN_SW_ERROR_COUNTER_NOP: No change in counter setup * @USB4_MARGIN_SW_ERROR_COUNTER_CLEAR: Set the error counter to 0, enable counter * @USB4_MARGIN_SW_ERROR_COUNTER_START: Start counter, count from last value * @USB4_MARGIN_SW_ERROR_COUNTER_STOP: Stop counter, do not clear value */ enum usb4_margin_sw_error_counter { USB4_MARGIN_SW_ERROR_COUNTER_NOP, USB4_MARGIN_SW_ERROR_COUNTER_CLEAR, USB4_MARGIN_SW_ERROR_COUNTER_START, USB4_MARGIN_SW_ERROR_COUNTER_STOP, }; enum usb4_margining_lane { USB4_MARGINING_LANE_RX0 = 0, USB4_MARGINING_LANE_RX1 = 1, USB4_MARGINING_LANE_RX2 = 2, USB4_MARGINING_LANE_ALL = 7, }; /** * struct usb4_port_margining_params - USB4 margining parameters * @error_counter: Error counter operation for software margining * @ber_level: Current BER level contour value * @lanes: Lanes to enable for the margining operation * @voltage_time_offset: Offset for voltage / time for software margining * @optional_voltage_offset_range: Enable optional extended voltage range * @right_high: %false if left/low margin test is performed, %true if right/high * @upper_eye: %true if margin test is done on upper eye, %false if done on * lower eye * @time: %true if time margining is used instead of voltage */ struct usb4_port_margining_params { enum usb4_margin_sw_error_counter error_counter; u32 ber_level; enum usb4_margining_lane lanes; u32 voltage_time_offset; bool optional_voltage_offset_range; bool right_high; bool upper_eye; bool time; }; int usb4_port_margining_caps(struct tb_port *port, enum usb4_sb_target target, u8 index, u32 *caps, size_t ncaps); int usb4_port_hw_margin(struct tb_port *port, enum usb4_sb_target target, u8 index, const struct usb4_port_margining_params *params, u32 *results, size_t nresults); int usb4_port_sw_margin(struct tb_port *port, enum usb4_sb_target target, u8 index, const struct usb4_port_margining_params *params, u32 *results); int usb4_port_sw_margin_errors(struct tb_port *port, enum usb4_sb_target target, u8 index, u32 *errors); int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index); int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index); int usb4_port_retimer_is_last(struct tb_port *port, u8 index); int usb4_port_retimer_is_cable(struct tb_port *port, u8 index); int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index); int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index, unsigned int address); int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address, const void *buf, size_t size); int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index); int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index, u32 *status); int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index, unsigned int address, void *buf, size_t size); int usb4_usb3_port_max_link_rate(struct tb_port *port); int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw, int *downstream_bw); int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw, int *downstream_bw); int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw, int *downstream_bw); int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id); bool usb4_dp_port_bandwidth_mode_supported(struct tb_port *port); bool usb4_dp_port_bandwidth_mode_enabled(struct tb_port *port); int usb4_dp_port_set_cm_bandwidth_mode_supported(struct tb_port *port, bool supported); int usb4_dp_port_group_id(struct tb_port *port); int usb4_dp_port_set_group_id(struct tb_port *port, int group_id); int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes); int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes); int usb4_dp_port_granularity(struct tb_port *port); int usb4_dp_port_set_granularity(struct tb_port *port, int granularity); int usb4_dp_port_set_estimated_bandwidth(struct tb_port *port, int bw); int usb4_dp_port_allocated_bandwidth(struct tb_port *port); int usb4_dp_port_allocate_bandwidth(struct tb_port *port, int bw); int usb4_dp_port_requested_bandwidth(struct tb_port *port); int usb4_pci_port_set_ext_encapsulation(struct tb_port *port, bool enable); static inline bool tb_is_usb4_port_device(const struct device *dev) { return dev->type == &usb4_port_device_type; } static inline struct usb4_port *tb_to_usb4_port_device(struct device *dev) { if (tb_is_usb4_port_device(dev)) return container_of(dev, struct usb4_port, dev); return NULL; } struct usb4_port *usb4_port_device_add(struct tb_port *port); void usb4_port_device_remove(struct usb4_port *usb4); int usb4_port_device_resume(struct usb4_port *usb4); int usb4_port_index(const struct tb_switch *sw, const struct tb_port *port); static inline bool usb4_port_device_is_offline(const struct usb4_port *usb4) { return usb4->offline; } void tb_check_quirks(struct tb_switch *sw); #ifdef CONFIG_ACPI bool tb_acpi_add_links(struct tb_nhi *nhi); bool tb_acpi_is_native(void); bool tb_acpi_may_tunnel_usb3(void); bool tb_acpi_may_tunnel_dp(void); bool tb_acpi_may_tunnel_pcie(void); bool tb_acpi_is_xdomain_allowed(void); int tb_acpi_init(void); void tb_acpi_exit(void); int tb_acpi_power_on_retimers(struct tb_port *port); int tb_acpi_power_off_retimers(struct tb_port *port); #else static inline bool tb_acpi_add_links(struct tb_nhi *nhi) { return false; } static inline bool tb_acpi_is_native(void) { return true; } static inline bool tb_acpi_may_tunnel_usb3(void) { return true; } static inline bool tb_acpi_may_tunnel_dp(void) { return true; } static inline bool tb_acpi_may_tunnel_pcie(void) { return true; } static inline bool tb_acpi_is_xdomain_allowed(void) { return true; } static inline int tb_acpi_init(void) { return 0; } static inline void tb_acpi_exit(void) { } static inline int tb_acpi_power_on_retimers(struct tb_port *port) { return 0; } static inline int tb_acpi_power_off_retimers(struct tb_port *port) { return 0; } #endif #ifdef CONFIG_DEBUG_FS void tb_debugfs_init(void); void tb_debugfs_exit(void); void tb_switch_debugfs_init(struct tb_switch *sw); void tb_switch_debugfs_remove(struct tb_switch *sw); void tb_xdomain_debugfs_init(struct tb_xdomain *xd); void tb_xdomain_debugfs_remove(struct tb_xdomain *xd); void tb_service_debugfs_init(struct tb_service *svc); void tb_service_debugfs_remove(struct tb_service *svc); void tb_retimer_debugfs_init(struct tb_retimer *rt); void tb_retimer_debugfs_remove(struct tb_retimer *rt); #else static inline void tb_debugfs_init(void) { } static inline void tb_debugfs_exit(void) { } static inline void tb_switch_debugfs_init(struct tb_switch *sw) { } static inline void tb_switch_debugfs_remove(struct tb_switch *sw) { } static inline void tb_xdomain_debugfs_init(struct tb_xdomain *xd) { } static inline void tb_xdomain_debugfs_remove(struct tb_xdomain *xd) { } static inline void tb_service_debugfs_init(struct tb_service *svc) { } static inline void tb_service_debugfs_remove(struct tb_service *svc) { } static inline void tb_retimer_debugfs_init(struct tb_retimer *rt) { } static inline void tb_retimer_debugfs_remove(struct tb_retimer *rt) { } #endif #endif
16 1 1 3 3 3 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 // SPDX-License-Identifier: GPL-2.0-or-later /* * * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi) * Copyright (C) Darryl Miles G7LED (dlm@g7led.demon.co.uk) * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de) * Copyright (C) Frederic Rible F1OAT (frible@teaser.fr) * Copyright (C) 2002 Ralf Baechle DO1GRB (ralf@gnu.org) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/jiffies.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <linux/uaccess.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> static void ax25_heartbeat_expiry(struct timer_list *); static void ax25_t1timer_expiry(struct timer_list *); static void ax25_t2timer_expiry(struct timer_list *); static void ax25_t3timer_expiry(struct timer_list *); static void ax25_idletimer_expiry(struct timer_list *); void ax25_setup_timers(ax25_cb *ax25) { timer_setup(&ax25->timer, ax25_heartbeat_expiry, 0); timer_setup(&ax25->t1timer, ax25_t1timer_expiry, 0); timer_setup(&ax25->t2timer, ax25_t2timer_expiry, 0); timer_setup(&ax25->t3timer, ax25_t3timer_expiry, 0); timer_setup(&ax25->idletimer, ax25_idletimer_expiry, 0); } void ax25_start_heartbeat(ax25_cb *ax25) { mod_timer(&ax25->timer, jiffies + 5 * HZ); } void ax25_start_t1timer(ax25_cb *ax25) { mod_timer(&ax25->t1timer, jiffies + ax25->t1); } void ax25_start_t2timer(ax25_cb *ax25) { mod_timer(&ax25->t2timer, jiffies + ax25->t2); } void ax25_start_t3timer(ax25_cb *ax25) { if (ax25->t3 > 0) mod_timer(&ax25->t3timer, jiffies + ax25->t3); else timer_delete(&ax25->t3timer); } void ax25_start_idletimer(ax25_cb *ax25) { if (ax25->idle > 0) mod_timer(&ax25->idletimer, jiffies + ax25->idle); else timer_delete(&ax25->idletimer); } void ax25_stop_heartbeat(ax25_cb *ax25) { timer_delete(&ax25->timer); } void ax25_stop_t1timer(ax25_cb *ax25) { timer_delete(&ax25->t1timer); } void ax25_stop_t2timer(ax25_cb *ax25) { timer_delete(&ax25->t2timer); } void ax25_stop_t3timer(ax25_cb *ax25) { timer_delete(&ax25->t3timer); } void ax25_stop_idletimer(ax25_cb *ax25) { timer_delete(&ax25->idletimer); } int ax25_t1timer_running(ax25_cb *ax25) { return timer_pending(&ax25->t1timer); } unsigned long ax25_display_timer(struct timer_list *timer) { long delta = timer->expires - jiffies; if (!timer_pending(timer)) return 0; return max(0L, delta); } EXPORT_SYMBOL(ax25_display_timer); static void ax25_heartbeat_expiry(struct timer_list *t) { int proto = AX25_PROTO_STD_SIMPLEX; ax25_cb *ax25 = timer_container_of(ax25, t, timer); if (ax25->ax25_dev) proto = ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]; switch (proto) { case AX25_PROTO_STD_SIMPLEX: case AX25_PROTO_STD_DUPLEX: ax25_std_heartbeat_expiry(ax25); break; #ifdef CONFIG_AX25_DAMA_SLAVE case AX25_PROTO_DAMA_SLAVE: if (ax25->ax25_dev->dama.slave) ax25_ds_heartbeat_expiry(ax25); else ax25_std_heartbeat_expiry(ax25); break; #endif } } static void ax25_t1timer_expiry(struct timer_list *t) { ax25_cb *ax25 = timer_container_of(ax25, t, t1timer); switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) { case AX25_PROTO_STD_SIMPLEX: case AX25_PROTO_STD_DUPLEX: ax25_std_t1timer_expiry(ax25); break; #ifdef CONFIG_AX25_DAMA_SLAVE case AX25_PROTO_DAMA_SLAVE: if (!ax25->ax25_dev->dama.slave) ax25_std_t1timer_expiry(ax25); break; #endif } } static void ax25_t2timer_expiry(struct timer_list *t) { ax25_cb *ax25 = timer_container_of(ax25, t, t2timer); switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) { case AX25_PROTO_STD_SIMPLEX: case AX25_PROTO_STD_DUPLEX: ax25_std_t2timer_expiry(ax25); break; #ifdef CONFIG_AX25_DAMA_SLAVE case AX25_PROTO_DAMA_SLAVE: if (!ax25->ax25_dev->dama.slave) ax25_std_t2timer_expiry(ax25); break; #endif } } static void ax25_t3timer_expiry(struct timer_list *t) { ax25_cb *ax25 = timer_container_of(ax25, t, t3timer); switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) { case AX25_PROTO_STD_SIMPLEX: case AX25_PROTO_STD_DUPLEX: ax25_std_t3timer_expiry(ax25); break; #ifdef CONFIG_AX25_DAMA_SLAVE case AX25_PROTO_DAMA_SLAVE: if (ax25->ax25_dev->dama.slave) ax25_ds_t3timer_expiry(ax25); else ax25_std_t3timer_expiry(ax25); break; #endif } } static void ax25_idletimer_expiry(struct timer_list *t) { ax25_cb *ax25 = timer_container_of(ax25, t, idletimer); switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) { case AX25_PROTO_STD_SIMPLEX: case AX25_PROTO_STD_DUPLEX: ax25_std_idletimer_expiry(ax25); break; #ifdef CONFIG_AX25_DAMA_SLAVE case AX25_PROTO_DAMA_SLAVE: if (ax25->ax25_dev->dama.slave) ax25_ds_idletimer_expiry(ax25); else ax25_std_idletimer_expiry(ax25); break; #endif } }
188 1308 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NET_DST_OPS_H #define _NET_DST_OPS_H #include <linux/types.h> #include <linux/percpu_counter.h> #include <linux/cache.h> struct dst_entry; struct kmem_cachep; struct net_device; struct sk_buff; struct sock; struct net; struct dst_ops { unsigned short family; unsigned int gc_thresh; void (*gc)(struct dst_ops *ops); struct dst_entry * (*check)(struct dst_entry *, __u32 cookie); unsigned int (*default_advmss)(const struct dst_entry *); unsigned int (*mtu)(const struct dst_entry *); u32 * (*cow_metrics)(struct dst_entry *, unsigned long); void (*destroy)(struct dst_entry *); void (*ifdown)(struct dst_entry *, struct net_device *dev); void (*negative_advice)(struct sock *sk, struct dst_entry *); void (*link_failure)(struct sk_buff *); void (*update_pmtu)(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, u32 mtu, bool confirm_neigh); void (*redirect)(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb); int (*local_out)(struct net *net, struct sock *sk, struct sk_buff *skb); struct neighbour * (*neigh_lookup)(const struct dst_entry *dst, struct sk_buff *skb, const void *daddr); void (*confirm_neigh)(const struct dst_entry *dst, const void *daddr); struct kmem_cache *kmem_cachep; struct percpu_counter pcpuc_entries ____cacheline_aligned_in_smp; }; static inline int dst_entries_get_fast(struct dst_ops *dst) { return percpu_counter_read_positive(&dst->pcpuc_entries); } static inline int dst_entries_get_slow(struct dst_ops *dst) { return percpu_counter_sum_positive(&dst->pcpuc_entries); } #define DST_PERCPU_COUNTER_BATCH 32 static inline void dst_entries_add(struct dst_ops *dst, int val) { percpu_counter_add_batch(&dst->pcpuc_entries, val, DST_PERCPU_COUNTER_BATCH); } static inline int dst_entries_init(struct dst_ops *dst) { return percpu_counter_init(&dst->pcpuc_entries, 0, GFP_KERNEL); } static inline void dst_entries_destroy(struct dst_ops *dst) { percpu_counter_destroy(&dst->pcpuc_entries); } #endif
41 41 215 46 46 46 46 2 46 2 46 2 46 46 43 30 10 10 10 10 57 40 97 97 97 141 140 141 14 141 141 140 141 139 140 42 2 141 13 141 141 41 41 33 8 41 41 2 41 41 41 41 41 41 40 41 40 40 37 3 39 30 30 30 39 40 81 80 68 68 68 2472 56 26 26 56 25 56 43 44 23 23 242 242 242 241 242 242 242 242 240 127 127 127 90 89 126 127 127 90 127 127 127 126 126 127 126 2 2 125 127 127 125 126 127 127 125 127 126 126 14 125 33 33 33 7 33 32 32 32 32 3 3 3 3 3 231 231 109 85 84 85 8 85 36 142 138 140 140 140 140 138 4 139 29 142 142 141 141 142 140 41 40 22 21 39 140 141 142 141 141 140 141 16 142 142 142 16 141 142 127 3 2 2 3 86 85 85 86 187 186 187 187 186 184 414 231 415 409 191 20 261 261 111 110 112 111 112 113 113 112 113 112 112 112 113 111 113 115 115 115 115 29 3 29 14 29 115 143 142 115 115 143 68 68 68 68 26 18 18 18 68 68 66 68 68 68 31 68 68 68 68 68 67 68 67 15 67 11 11 11 11 11 31 31 31 31 31 31 31 34 34 34 34 34 34 34 34 33 34 19 1 34 34 31 31 31 31 34 2 18 18 17 13 13 34 13 13 34 1 34 33 33 33