Total coverage: 355279 (18%)of 2016023
59 59 59 59 45 37 46 40 46 14 17 16 17 3 3 3 3 47 2 47 48 4 2 3 1 1 1 1 48 48 44 43 16 27 42 45 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 // SPDX-License-Identifier: GPL-2.0-or-later /* * OSS compatible sequencer driver * * seq_oss_readq.c - MIDI input queue * * Copyright (C) 1998,99 Takashi Iwai <tiwai@suse.de> */ #include "seq_oss_readq.h" #include "seq_oss_event.h" #include <sound/seq_oss_legacy.h> #include "../seq_lock.h" #include <linux/wait.h> #include <linux/slab.h> /* * constants */ //#define SNDRV_SEQ_OSS_MAX_TIMEOUT (unsigned long)(-1) #define SNDRV_SEQ_OSS_MAX_TIMEOUT (HZ * 3600) /* * prototypes */ /* * create a read queue */ struct seq_oss_readq * snd_seq_oss_readq_new(struct seq_oss_devinfo *dp, int maxlen) { struct seq_oss_readq *q; q = kzalloc(sizeof(*q), GFP_KERNEL); if (!q) return NULL; q->q = kcalloc(maxlen, sizeof(union evrec), GFP_KERNEL); if (!q->q) { kfree(q); return NULL; } q->maxlen = maxlen; q->qlen = 0; q->head = q->tail = 0; init_waitqueue_head(&q->midi_sleep); spin_lock_init(&q->lock); q->pre_event_timeout = SNDRV_SEQ_OSS_MAX_TIMEOUT; q->input_time = (unsigned long)-1; return q; } /* * delete the read queue */ void snd_seq_oss_readq_delete(struct seq_oss_readq *q) { if (q) { kfree(q->q); kfree(q); } } /* * reset the read queue */ void snd_seq_oss_readq_clear(struct seq_oss_readq *q) { if (q->qlen) { q->qlen = 0; q->head = q->tail = 0; } /* if someone sleeping, wake'em up */ wake_up(&q->midi_sleep); q->input_time = (unsigned long)-1; } /* * put a midi byte */ int snd_seq_oss_readq_puts(struct seq_oss_readq *q, int dev, unsigned char *data, int len) { union evrec rec; int result; memset(&rec, 0, sizeof(rec)); rec.c[0] = SEQ_MIDIPUTC; rec.c[2] = dev; while (len-- > 0) { rec.c[1] = *data++; result = snd_seq_oss_readq_put_event(q, &rec); if (result < 0) return result; } return 0; } /* * put MIDI sysex bytes; the event buffer may be chained, thus it has * to be expanded via snd_seq_dump_var_event(). */ struct readq_sysex_ctx { struct seq_oss_readq *readq; int dev; }; static int readq_dump_sysex(void *ptr, void *buf, int count) { struct readq_sysex_ctx *ctx = ptr; return snd_seq_oss_readq_puts(ctx->readq, ctx->dev, buf, count); } int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev, struct snd_seq_event *ev) { struct readq_sysex_ctx ctx = { .readq = q, .dev = dev }; if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE) return 0; return snd_seq_dump_var_event(ev, readq_dump_sysex, &ctx); } /* * copy an event to input queue: * return zero if enqueued */ int snd_seq_oss_readq_put_event(struct seq_oss_readq *q, union evrec *ev) { unsigned long flags; spin_lock_irqsave(&q->lock, flags); if (q->qlen >= q->maxlen - 1) { spin_unlock_irqrestore(&q->lock, flags); return -ENOMEM; } memcpy(&q->q[q->tail], ev, sizeof(*ev)); q->tail = (q->tail + 1) % q->maxlen; q->qlen++; /* wake up sleeper */ wake_up(&q->midi_sleep); spin_unlock_irqrestore(&q->lock, flags); return 0; } /* * pop queue * caller must hold lock */ int snd_seq_oss_readq_pick(struct seq_oss_readq *q, union evrec *rec) { if (q->qlen == 0) return -EAGAIN; memcpy(rec, &q->q[q->head], sizeof(*rec)); return 0; } /* * sleep until ready */ void snd_seq_oss_readq_wait(struct seq_oss_readq *q) { wait_event_interruptible_timeout(q->midi_sleep, (q->qlen > 0 || q->head == q->tail), q->pre_event_timeout); } /* * drain one record * caller must hold lock */ void snd_seq_oss_readq_free(struct seq_oss_readq *q) { if (q->qlen > 0) { q->head = (q->head + 1) % q->maxlen; q->qlen--; } } /* * polling/select: * return non-zero if readq is not empty. */ unsigned int snd_seq_oss_readq_poll(struct seq_oss_readq *q, struct file *file, poll_table *wait) { poll_wait(file, &q->midi_sleep, wait); return q->qlen; } /* * put a timestamp */ int snd_seq_oss_readq_put_timestamp(struct seq_oss_readq *q, unsigned long curt, int seq_mode) { if (curt != q->input_time) { union evrec rec; memset(&rec, 0, sizeof(rec)); switch (seq_mode) { case SNDRV_SEQ_OSS_MODE_SYNTH: rec.echo = (curt << 8) | SEQ_WAIT; snd_seq_oss_readq_put_event(q, &rec); break; case SNDRV_SEQ_OSS_MODE_MUSIC: rec.t.code = EV_TIMING; rec.t.cmd = TMR_WAIT_ABS; rec.t.time = curt; snd_seq_oss_readq_put_event(q, &rec); break; } q->input_time = curt; } return 0; } #ifdef CONFIG_SND_PROC_FS /* * proc interface */ void snd_seq_oss_readq_info_read(struct seq_oss_readq *q, struct snd_info_buffer *buf) { snd_iprintf(buf, " read queue [%s] length = %d : tick = %ld\n", (waitqueue_active(&q->midi_sleep) ? "sleeping":"running"), q->qlen, q->input_time); } #endif /* CONFIG_SND_PROC_FS */
4 3 2 2 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 // SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/errno.h> #include <linux/file.h> #include <linux/io_uring.h> #include <uapi/linux/io_uring.h> #include "../fs/internal.h" #include "io_uring.h" #include "statx.h" struct io_statx { struct file *file; int dfd; unsigned int mask; unsigned int flags; struct filename *filename; struct statx __user *buffer; }; int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_statx *sx = io_kiocb_to_cmd(req, struct io_statx); const char __user *path; if (sqe->buf_index || sqe->splice_fd_in) return -EINVAL; if (req->flags & REQ_F_FIXED_FILE) return -EBADF; sx->dfd = READ_ONCE(sqe->fd); sx->mask = READ_ONCE(sqe->len); path = u64_to_user_ptr(READ_ONCE(sqe->addr)); sx->buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2)); sx->flags = READ_ONCE(sqe->statx_flags); sx->filename = getname_uflags(path, sx->flags); if (IS_ERR(sx->filename)) { int ret = PTR_ERR(sx->filename); sx->filename = NULL; return ret; } req->flags |= REQ_F_NEED_CLEANUP; req->flags |= REQ_F_FORCE_ASYNC; return 0; } int io_statx(struct io_kiocb *req, unsigned int issue_flags) { struct io_statx *sx = io_kiocb_to_cmd(req, struct io_statx); int ret; WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); ret = do_statx(sx->dfd, sx->filename, sx->flags, sx->mask, sx->buffer); io_req_set_res(req, ret, 0); return IOU_OK; } void io_statx_cleanup(struct io_kiocb *req) { struct io_statx *sx = io_kiocb_to_cmd(req, struct io_statx); if (sx->filename) putname(sx->filename); }
2 2 1 1 1 11 11 11 11 11 11 10 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 11 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix * Copyright (C) 2006 Andrey Volkov, Varma Electronics * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com> * Copyright (C) 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr> */ #include <linux/can/dev.h> #include <net/rtnetlink.h> static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = { [IFLA_CAN_STATE] = { .type = NLA_U32 }, [IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) }, [IFLA_CAN_RESTART_MS] = { .type = NLA_U32 }, [IFLA_CAN_RESTART] = { .type = NLA_U32 }, [IFLA_CAN_BITTIMING] = { .len = sizeof(struct can_bittiming) }, [IFLA_CAN_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) }, [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) }, [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) }, [IFLA_CAN_DATA_BITTIMING] = { .len = sizeof(struct can_bittiming) }, [IFLA_CAN_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) }, [IFLA_CAN_TERMINATION] = { .type = NLA_U16 }, [IFLA_CAN_TDC] = { .type = NLA_NESTED }, [IFLA_CAN_CTRLMODE_EXT] = { .type = NLA_NESTED }, }; static const struct nla_policy can_tdc_policy[IFLA_CAN_TDC_MAX + 1] = { [IFLA_CAN_TDC_TDCV_MIN] = { .type = NLA_U32 }, [IFLA_CAN_TDC_TDCV_MAX] = { .type = NLA_U32 }, [IFLA_CAN_TDC_TDCO_MIN] = { .type = NLA_U32 }, [IFLA_CAN_TDC_TDCO_MAX] = { .type = NLA_U32 }, [IFLA_CAN_TDC_TDCF_MIN] = { .type = NLA_U32 }, [IFLA_CAN_TDC_TDCF_MAX] = { .type = NLA_U32 }, [IFLA_CAN_TDC_TDCV] = { .type = NLA_U32 }, [IFLA_CAN_TDC_TDCO] = { .type = NLA_U32 }, [IFLA_CAN_TDC_TDCF] = { .type = NLA_U32 }, }; static int can_validate_bittiming(const struct can_bittiming *bt, struct netlink_ext_ack *extack) { /* sample point is in one-tenth of a percent */ if (bt->sample_point >= 1000) { NL_SET_ERR_MSG(extack, "sample point must be between 0 and 100%"); return -EINVAL; } return 0; } static int can_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { bool is_can_fd = false; int err; /* Make sure that valid CAN FD configurations always consist of * - nominal/arbitration bittiming * - data bittiming * - control mode with CAN_CTRLMODE_FD set * - TDC parameters are coherent (details below) */ if (!data) return 0; if (data[IFLA_CAN_CTRLMODE]) { struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]); u32 tdc_flags = cm->flags & CAN_CTRLMODE_TDC_MASK; is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD; /* CAN_CTRLMODE_TDC_{AUTO,MANUAL} are mutually exclusive */ if (tdc_flags == CAN_CTRLMODE_TDC_MASK) return -EOPNOTSUPP; /* If one of the CAN_CTRLMODE_TDC_* flag is set then * TDC must be set and vice-versa */ if (!!tdc_flags != !!data[IFLA_CAN_TDC]) return -EOPNOTSUPP; /* If providing TDC parameters, at least TDCO is * needed. TDCV is needed if and only if * CAN_CTRLMODE_TDC_MANUAL is set */ if (data[IFLA_CAN_TDC]) { struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1]; err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX, data[IFLA_CAN_TDC], can_tdc_policy, extack); if (err) return err; if (tb_tdc[IFLA_CAN_TDC_TDCV]) { if (tdc_flags & CAN_CTRLMODE_TDC_AUTO) return -EOPNOTSUPP; } else { if (tdc_flags & CAN_CTRLMODE_TDC_MANUAL) return -EOPNOTSUPP; } if (!tb_tdc[IFLA_CAN_TDC_TDCO]) return -EOPNOTSUPP; } } if (data[IFLA_CAN_BITTIMING]) { struct can_bittiming bt; memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt)); err = can_validate_bittiming(&bt, extack); if (err) return err; } if (is_can_fd) { if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING]) return -EOPNOTSUPP; } if (data[IFLA_CAN_DATA_BITTIMING] || data[IFLA_CAN_TDC]) { if (!is_can_fd) return -EOPNOTSUPP; } if (data[IFLA_CAN_DATA_BITTIMING]) { struct can_bittiming bt; memcpy(&bt, nla_data(data[IFLA_CAN_DATA_BITTIMING]), sizeof(bt)); err = can_validate_bittiming(&bt, extack); if (err) return err; } return 0; } static int can_tdc_changelink(struct can_priv *priv, const struct nlattr *nla, struct netlink_ext_ack *extack) { struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1]; struct can_tdc tdc = { 0 }; const struct can_tdc_const *tdc_const = priv->tdc_const; int err; if (!tdc_const || !can_tdc_is_enabled(priv)) return -EOPNOTSUPP; err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX, nla, can_tdc_policy, extack); if (err) return err; if (tb_tdc[IFLA_CAN_TDC_TDCV]) { u32 tdcv = nla_get_u32(tb_tdc[IFLA_CAN_TDC_TDCV]); if (tdcv < tdc_const->tdcv_min || tdcv > tdc_const->tdcv_max) return -EINVAL; tdc.tdcv = tdcv; } if (tb_tdc[IFLA_CAN_TDC_TDCO]) { u32 tdco = nla_get_u32(tb_tdc[IFLA_CAN_TDC_TDCO]); if (tdco < tdc_const->tdco_min || tdco > tdc_const->tdco_max) return -EINVAL; tdc.tdco = tdco; } if (tb_tdc[IFLA_CAN_TDC_TDCF]) { u32 tdcf = nla_get_u32(tb_tdc[IFLA_CAN_TDC_TDCF]); if (tdcf < tdc_const->tdcf_min || tdcf > tdc_const->tdcf_max) return -EINVAL; tdc.tdcf = tdcf; } priv->tdc = tdc; return 0; } static int can_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { struct can_priv *priv = netdev_priv(dev); u32 tdc_mask = 0; int err; /* We need synchronization with dev->stop() */ ASSERT_RTNL(); if (data[IFLA_CAN_CTRLMODE]) { struct can_ctrlmode *cm; u32 ctrlstatic; u32 maskedflags; /* Do not allow changing controller mode while running */ if (dev->flags & IFF_UP) return -EBUSY; cm = nla_data(data[IFLA_CAN_CTRLMODE]); ctrlstatic = can_get_static_ctrlmode(priv); maskedflags = cm->flags & cm->mask; /* check whether provided bits are allowed to be passed */ if (maskedflags & ~(priv->ctrlmode_supported | ctrlstatic)) return -EOPNOTSUPP; /* do not check for static fd-non-iso if 'fd' is disabled */ if (!(maskedflags & CAN_CTRLMODE_FD)) ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO; /* make sure static options are provided by configuration */ if ((maskedflags & ctrlstatic) != ctrlstatic) return -EOPNOTSUPP; /* clear bits to be modified and copy the flag values */ priv->ctrlmode &= ~cm->mask; priv->ctrlmode |= maskedflags; /* CAN_CTRLMODE_FD can only be set when driver supports FD */ if (priv->ctrlmode & CAN_CTRLMODE_FD) { dev->mtu = CANFD_MTU; } else { dev->mtu = CAN_MTU; memset(&priv->data_bittiming, 0, sizeof(priv->data_bittiming)); priv->ctrlmode &= ~CAN_CTRLMODE_TDC_MASK; memset(&priv->tdc, 0, sizeof(priv->tdc)); } tdc_mask = cm->mask & CAN_CTRLMODE_TDC_MASK; /* CAN_CTRLMODE_TDC_{AUTO,MANUAL} are mutually * exclusive: make sure to turn the other one off */ if (tdc_mask) priv->ctrlmode &= cm->flags | ~CAN_CTRLMODE_TDC_MASK; } if (data[IFLA_CAN_BITTIMING]) { struct can_bittiming bt; /* Do not allow changing bittiming while running */ if (dev->flags & IFF_UP) return -EBUSY; /* Calculate bittiming parameters based on * bittiming_const if set, otherwise pass bitrate * directly via do_set_bitrate(). Bail out if neither * is given. */ if (!priv->bittiming_const && !priv->do_set_bittiming && !priv->bitrate_const) return -EOPNOTSUPP; memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt)); err = can_get_bittiming(dev, &bt, priv->bittiming_const, priv->bitrate_const, priv->bitrate_const_cnt, extack); if (err) return err; if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) { NL_SET_ERR_MSG_FMT(extack, "arbitration bitrate %u bps surpasses transceiver capabilities of %u bps", bt.bitrate, priv->bitrate_max); return -EINVAL; } memcpy(&priv->bittiming, &bt, sizeof(bt)); if (priv->do_set_bittiming) { /* Finally, set the bit-timing registers */ err = priv->do_set_bittiming(dev); if (err) return err; } } if (data[IFLA_CAN_RESTART_MS]) { /* Do not allow changing restart delay while running */ if (dev->flags & IFF_UP) return -EBUSY; priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]); } if (data[IFLA_CAN_RESTART]) { /* Do not allow a restart while not running */ if (!(dev->flags & IFF_UP)) return -EINVAL; err = can_restart_now(dev); if (err) return err; } if (data[IFLA_CAN_DATA_BITTIMING]) { struct can_bittiming dbt; /* Do not allow changing bittiming while running */ if (dev->flags & IFF_UP) return -EBUSY; /* Calculate bittiming parameters based on * data_bittiming_const if set, otherwise pass bitrate * directly via do_set_bitrate(). Bail out if neither * is given. */ if (!priv->data_bittiming_const && !priv->do_set_data_bittiming && !priv->data_bitrate_const) return -EOPNOTSUPP; memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]), sizeof(dbt)); err = can_get_bittiming(dev, &dbt, priv->data_bittiming_const, priv->data_bitrate_const, priv->data_bitrate_const_cnt, extack); if (err) return err; if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) { NL_SET_ERR_MSG_FMT(extack, "CANFD data bitrate %u bps surpasses transceiver capabilities of %u bps", dbt.bitrate, priv->bitrate_max); return -EINVAL; } memset(&priv->tdc, 0, sizeof(priv->tdc)); if (data[IFLA_CAN_TDC]) { /* TDC parameters are provided: use them */ err = can_tdc_changelink(priv, data[IFLA_CAN_TDC], extack); if (err) { priv->ctrlmode &= ~CAN_CTRLMODE_TDC_MASK; return err; } } else if (!tdc_mask) { /* Neither of TDC parameters nor TDC flags are * provided: do calculation */ can_calc_tdco(&priv->tdc, priv->tdc_const, &dbt, &priv->ctrlmode, priv->ctrlmode_supported); } /* else: both CAN_CTRLMODE_TDC_{AUTO,MANUAL} are explicitly * turned off. TDC is disabled: do nothing */ memcpy(&priv->data_bittiming, &dbt, sizeof(dbt)); if (priv->do_set_data_bittiming) { /* Finally, set the bit-timing registers */ err = priv->do_set_data_bittiming(dev); if (err) return err; } } if (data[IFLA_CAN_TERMINATION]) { const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]); const unsigned int num_term = priv->termination_const_cnt; unsigned int i; if (!priv->do_set_termination) return -EOPNOTSUPP; /* check whether given value is supported by the interface */ for (i = 0; i < num_term; i++) { if (termval == priv->termination_const[i]) break; } if (i >= num_term) return -EINVAL; /* Finally, set the termination value */ err = priv->do_set_termination(dev, termval); if (err) return err; priv->termination = termval; } return 0; } static size_t can_tdc_get_size(const struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); size_t size; if (!priv->tdc_const) return 0; size = nla_total_size(0); /* nest IFLA_CAN_TDC */ if (priv->ctrlmode_supported & CAN_CTRLMODE_TDC_MANUAL) { size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV_MIN */ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV_MAX */ } size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MIN */ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MAX */ if (priv->tdc_const->tdcf_max) { size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MIN */ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MAX */ } if (can_tdc_is_enabled(priv)) { if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL || priv->do_get_auto_tdcv) size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV */ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO */ if (priv->tdc_const->tdcf_max) size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF */ } return size; } static size_t can_ctrlmode_ext_get_size(void) { return nla_total_size(0) + /* nest IFLA_CAN_CTRLMODE_EXT */ nla_total_size(sizeof(u32)); /* IFLA_CAN_CTRLMODE_SUPPORTED */ } static size_t can_get_size(const struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); size_t size = 0; if (priv->bittiming.bitrate) /* IFLA_CAN_BITTIMING */ size += nla_total_size(sizeof(struct can_bittiming)); if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */ size += nla_total_size(sizeof(struct can_bittiming_const)); size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */ size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */ if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */ size += nla_total_size(sizeof(struct can_berr_counter)); if (priv->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */ size += nla_total_size(sizeof(struct can_bittiming)); if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */ size += nla_total_size(sizeof(struct can_bittiming_const)); if (priv->termination_const) { size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */ size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */ priv->termination_const_cnt); } if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */ size += nla_total_size(sizeof(*priv->bitrate_const) * priv->bitrate_const_cnt); if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */ size += nla_total_size(sizeof(*priv->data_bitrate_const) * priv->data_bitrate_const_cnt); size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */ size += can_tdc_get_size(dev); /* IFLA_CAN_TDC */ size += can_ctrlmode_ext_get_size(); /* IFLA_CAN_CTRLMODE_EXT */ return size; } static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct nlattr *nest; struct can_priv *priv = netdev_priv(dev); struct can_tdc *tdc = &priv->tdc; const struct can_tdc_const *tdc_const = priv->tdc_const; if (!tdc_const) return 0; nest = nla_nest_start(skb, IFLA_CAN_TDC); if (!nest) return -EMSGSIZE; if (priv->ctrlmode_supported & CAN_CTRLMODE_TDC_MANUAL && (nla_put_u32(skb, IFLA_CAN_TDC_TDCV_MIN, tdc_const->tdcv_min) || nla_put_u32(skb, IFLA_CAN_TDC_TDCV_MAX, tdc_const->tdcv_max))) goto err_cancel; if (nla_put_u32(skb, IFLA_CAN_TDC_TDCO_MIN, tdc_const->tdco_min) || nla_put_u32(skb, IFLA_CAN_TDC_TDCO_MAX, tdc_const->tdco_max)) goto err_cancel; if (tdc_const->tdcf_max && (nla_put_u32(skb, IFLA_CAN_TDC_TDCF_MIN, tdc_const->tdcf_min) || nla_put_u32(skb, IFLA_CAN_TDC_TDCF_MAX, tdc_const->tdcf_max))) goto err_cancel; if (can_tdc_is_enabled(priv)) { u32 tdcv; int err = -EINVAL; if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL) { tdcv = tdc->tdcv; err = 0; } else if (priv->do_get_auto_tdcv) { err = priv->do_get_auto_tdcv(dev, &tdcv); } if (!err && nla_put_u32(skb, IFLA_CAN_TDC_TDCV, tdcv)) goto err_cancel; if (nla_put_u32(skb, IFLA_CAN_TDC_TDCO, tdc->tdco)) goto err_cancel; if (tdc_const->tdcf_max && nla_put_u32(skb, IFLA_CAN_TDC_TDCF, tdc->tdcf)) goto err_cancel; } nla_nest_end(skb, nest); return 0; err_cancel: nla_nest_cancel(skb, nest); return -EMSGSIZE; } static int can_ctrlmode_ext_fill_info(struct sk_buff *skb, const struct can_priv *priv) { struct nlattr *nest; nest = nla_nest_start(skb, IFLA_CAN_CTRLMODE_EXT); if (!nest) return -EMSGSIZE; if (nla_put_u32(skb, IFLA_CAN_CTRLMODE_SUPPORTED, priv->ctrlmode_supported)) { nla_nest_cancel(skb, nest); return -EMSGSIZE; } nla_nest_end(skb, nest); return 0; } static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); struct can_ctrlmode cm = {.flags = priv->ctrlmode}; struct can_berr_counter bec = { }; enum can_state state = priv->state; if (priv->do_get_state) priv->do_get_state(dev, &state); if ((priv->bittiming.bitrate != CAN_BITRATE_UNSET && priv->bittiming.bitrate != CAN_BITRATE_UNKNOWN && nla_put(skb, IFLA_CAN_BITTIMING, sizeof(priv->bittiming), &priv->bittiming)) || (priv->bittiming_const && nla_put(skb, IFLA_CAN_BITTIMING_CONST, sizeof(*priv->bittiming_const), priv->bittiming_const)) || nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) || nla_put_u32(skb, IFLA_CAN_STATE, state) || nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) || nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) || (priv->do_get_berr_counter && !priv->do_get_berr_counter(dev, &bec) && nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) || (priv->data_bittiming.bitrate && nla_put(skb, IFLA_CAN_DATA_BITTIMING, sizeof(priv->data_bittiming), &priv->data_bittiming)) || (priv->data_bittiming_const && nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST, sizeof(*priv->data_bittiming_const), priv->data_bittiming_const)) || (priv->termination_const && (nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) || nla_put(skb, IFLA_CAN_TERMINATION_CONST, sizeof(*priv->termination_const) * priv->termination_const_cnt, priv->termination_const))) || (priv->bitrate_const && nla_put(skb, IFLA_CAN_BITRATE_CONST, sizeof(*priv->bitrate_const) * priv->bitrate_const_cnt, priv->bitrate_const)) || (priv->data_bitrate_const && nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST, sizeof(*priv->data_bitrate_const) * priv->data_bitrate_const_cnt, priv->data_bitrate_const)) || (nla_put(skb, IFLA_CAN_BITRATE_MAX, sizeof(priv->bitrate_max), &priv->bitrate_max)) || can_tdc_fill_info(skb, dev) || can_ctrlmode_ext_fill_info(skb, priv) ) return -EMSGSIZE; return 0; } static size_t can_get_xstats_size(const struct net_device *dev) { return sizeof(struct can_device_stats); } static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); if (nla_put(skb, IFLA_INFO_XSTATS, sizeof(priv->can_stats), &priv->can_stats)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } static int can_newlink(struct net_device *dev, struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { return -EOPNOTSUPP; } static void can_dellink(struct net_device *dev, struct list_head *head) { } struct rtnl_link_ops can_link_ops __read_mostly = { .kind = "can", .netns_refund = true, .maxtype = IFLA_CAN_MAX, .policy = can_policy, .setup = can_setup, .validate = can_validate, .newlink = can_newlink, .changelink = can_changelink, .dellink = can_dellink, .get_size = can_get_size, .fill_info = can_fill_info, .get_xstats_size = can_get_xstats_size, .fill_xstats = can_fill_xstats, }; int can_netlink_register(void) { return rtnl_link_register(&can_link_ops); } void can_netlink_unregister(void) { rtnl_link_unregister(&can_link_ops); }
1 1 2 2 1 1 2 3 2 1 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 // SPDX-License-Identifier: GPL-2.0-only /* * vivid-radio-tx.c - radio transmitter support functions. * * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/delay.h> #include <linux/videodev2.h> #include <linux/v4l2-dv-timings.h> #include <media/v4l2-common.h> #include <media/v4l2-event.h> #include <media/v4l2-dv-timings.h> #include "vivid-core.h" #include "vivid-ctrls.h" #include "vivid-radio-common.h" #include "vivid-radio-tx.h" ssize_t vivid_radio_tx_write(struct file *file, const char __user *buf, size_t size, loff_t *offset) { struct vivid_dev *dev = video_drvdata(file); struct v4l2_rds_data *data = dev->rds_gen.data; ktime_t timestamp; unsigned blk; int i; if (dev->radio_tx_rds_controls) return -EINVAL; if (size < sizeof(*data)) return -EINVAL; size = sizeof(*data) * (size / sizeof(*data)); if (mutex_lock_interruptible(&dev->mutex)) return -ERESTARTSYS; if (dev->radio_tx_rds_owner && file->private_data != dev->radio_tx_rds_owner) { mutex_unlock(&dev->mutex); return -EBUSY; } dev->radio_tx_rds_owner = file->private_data; retry: timestamp = ktime_sub(ktime_get(), dev->radio_rds_init_time); blk = ktime_divns(timestamp, VIVID_RDS_NSEC_PER_BLK); if (blk - VIVID_RDS_GEN_BLOCKS >= dev->radio_tx_rds_last_block) dev->radio_tx_rds_last_block = blk - VIVID_RDS_GEN_BLOCKS + 1; /* * No data is available if there hasn't been time to get new data, * or if the RDS receiver has been disabled, or if we use the data * from the RDS transmitter and that RDS transmitter has been disabled, * or if the signal quality is too weak. */ if (blk == dev->radio_tx_rds_last_block || !(dev->radio_tx_subchans & V4L2_TUNER_SUB_RDS)) { mutex_unlock(&dev->mutex); if (file->f_flags & O_NONBLOCK) return -EWOULDBLOCK; if (msleep_interruptible(20) && signal_pending(current)) return -EINTR; if (mutex_lock_interruptible(&dev->mutex)) return -ERESTARTSYS; goto retry; } for (i = 0; i < size && blk > dev->radio_tx_rds_last_block; dev->radio_tx_rds_last_block++) { unsigned data_blk = dev->radio_tx_rds_last_block % VIVID_RDS_GEN_BLOCKS; struct v4l2_rds_data rds; if (copy_from_user(&rds, buf + i, sizeof(rds))) { i = -EFAULT; break; } i += sizeof(rds); if (!dev->radio_rds_loop) continue; if ((rds.block & V4L2_RDS_BLOCK_MSK) == V4L2_RDS_BLOCK_INVALID || (rds.block & V4L2_RDS_BLOCK_ERROR)) continue; rds.block &= V4L2_RDS_BLOCK_MSK; data[data_blk] = rds; } mutex_unlock(&dev->mutex); return i; } __poll_t vivid_radio_tx_poll(struct file *file, struct poll_table_struct *wait) { return EPOLLOUT | EPOLLWRNORM | v4l2_ctrl_poll(file, wait); } int vidioc_g_modulator(struct file *file, void *fh, struct v4l2_modulator *a) { struct vivid_dev *dev = video_drvdata(file); if (a->index > 0) return -EINVAL; strscpy(a->name, "AM/FM/SW Transmitter", sizeof(a->name)); a->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_FREQ_BANDS | V4L2_TUNER_CAP_RDS | (dev->radio_tx_rds_controls ? V4L2_TUNER_CAP_RDS_CONTROLS : V4L2_TUNER_CAP_RDS_BLOCK_IO); a->rangelow = AM_FREQ_RANGE_LOW; a->rangehigh = FM_FREQ_RANGE_HIGH; a->txsubchans = dev->radio_tx_subchans; return 0; } int vidioc_s_modulator(struct file *file, void *fh, const struct v4l2_modulator *a) { struct vivid_dev *dev = video_drvdata(file); if (a->index) return -EINVAL; if (a->txsubchans & ~0x13) return -EINVAL; dev->radio_tx_subchans = a->txsubchans; return 0; }
682 679 684 682 682 682 683 1052 684 1057 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 // SPDX-License-Identifier: GPL-2.0-only /* * IPv6 packet mangling table, a port of the IPv4 mangle table to IPv6 * * Copyright (C) 2000-2001 by Harald Welte <laforge@gnumonks.org> * Copyright (C) 2000-2004 Netfilter Core Team <coreteam@netfilter.org> */ #include <linux/module.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <linux/slab.h> #include <net/ipv6.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); MODULE_DESCRIPTION("ip6tables mangle table"); #define MANGLE_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | \ (1 << NF_INET_LOCAL_IN) | \ (1 << NF_INET_FORWARD) | \ (1 << NF_INET_LOCAL_OUT) | \ (1 << NF_INET_POST_ROUTING)) static const struct xt_table packet_mangler = { .name = "mangle", .valid_hooks = MANGLE_VALID_HOOKS, .me = THIS_MODULE, .af = NFPROTO_IPV6, .priority = NF_IP6_PRI_MANGLE, }; static unsigned int ip6t_mangle_out(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { struct in6_addr saddr, daddr; unsigned int ret, verdict; u32 flowlabel, mark; u8 hop_limit; int err; /* save source/dest address, mark, hoplimit, flowlabel, priority, */ memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr)); memcpy(&daddr, &ipv6_hdr(skb)->daddr, sizeof(daddr)); mark = skb->mark; hop_limit = ipv6_hdr(skb)->hop_limit; /* flowlabel and prio (includes version, which shouldn't change either */ flowlabel = *((u_int32_t *)ipv6_hdr(skb)); ret = ip6t_do_table(priv, skb, state); verdict = ret & NF_VERDICT_MASK; if (verdict != NF_DROP && verdict != NF_STOLEN && (!ipv6_addr_equal(&ipv6_hdr(skb)->saddr, &saddr) || !ipv6_addr_equal(&ipv6_hdr(skb)->daddr, &daddr) || skb->mark != mark || ipv6_hdr(skb)->hop_limit != hop_limit || flowlabel != *((u_int32_t *)ipv6_hdr(skb)))) { err = ip6_route_me_harder(state->net, state->sk, skb); if (err < 0) ret = NF_DROP_ERR(err); } return ret; } /* The work comes in here from netfilter.c. */ static unsigned int ip6table_mangle_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { if (state->hook == NF_INET_LOCAL_OUT) return ip6t_mangle_out(priv, skb, state); return ip6t_do_table(priv, skb, state); } static struct nf_hook_ops *mangle_ops __read_mostly; static int ip6table_mangle_table_init(struct net *net) { struct ip6t_replace *repl; int ret; repl = ip6t_alloc_initial_table(&packet_mangler); if (repl == NULL) return -ENOMEM; ret = ip6t_register_table(net, &packet_mangler, repl, mangle_ops); kfree(repl); return ret; } static void __net_exit ip6table_mangle_net_pre_exit(struct net *net) { ip6t_unregister_table_pre_exit(net, "mangle"); } static void __net_exit ip6table_mangle_net_exit(struct net *net) { ip6t_unregister_table_exit(net, "mangle"); } static struct pernet_operations ip6table_mangle_net_ops = { .pre_exit = ip6table_mangle_net_pre_exit, .exit = ip6table_mangle_net_exit, }; static int __init ip6table_mangle_init(void) { int ret = xt_register_template(&packet_mangler, ip6table_mangle_table_init); if (ret < 0) return ret; mangle_ops = xt_hook_ops_alloc(&packet_mangler, ip6table_mangle_hook); if (IS_ERR(mangle_ops)) { xt_unregister_template(&packet_mangler); return PTR_ERR(mangle_ops); } ret = register_pernet_subsys(&ip6table_mangle_net_ops); if (ret < 0) { xt_unregister_template(&packet_mangler); kfree(mangle_ops); return ret; } return ret; } static void __exit ip6table_mangle_fini(void) { unregister_pernet_subsys(&ip6table_mangle_net_ops); xt_unregister_template(&packet_mangler); kfree(mangle_ops); } module_init(ip6table_mangle_init); module_exit(ip6table_mangle_fini);
4 4 4 4 4 4 4 4 2 2 2 77 3 1 1 1 2 2 1 1 1 1 1 1 1 1 1 6 8 6 2 1 71 71 71 71 71 2 3 3 3 3 2 3 3 3 3 3 3 3 3 24 24 24 24 24 24 24 24 24 4 4 4 4 3 11 11 11 11 11 11 13 13 13 11 11 11 11 11 11 11 9 13 11 11 11 11 9 9 13 13 13 13 13 13 13 13 13 13 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 /* * Copyright (C) 2017 Netronome Systems, Inc. * * This software is licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this * source tree. * * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. */ #include <linux/debugfs.h> #include <linux/etherdevice.h> #include <linux/ethtool_netlink.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/slab.h> #include <net/netdev_queues.h> #include <net/netdev_rx_queue.h> #include <net/page_pool/helpers.h> #include <net/netlink.h> #include <net/net_shaper.h> #include <net/netdev_lock.h> #include <net/pkt_cls.h> #include <net/rtnetlink.h> #include <net/udp_tunnel.h> #include "netdevsim.h" MODULE_IMPORT_NS("NETDEV_INTERNAL"); #define NSIM_RING_SIZE 256 static int nsim_napi_rx(struct nsim_rq *rq, struct sk_buff *skb) { if (skb_queue_len(&rq->skb_queue) > NSIM_RING_SIZE) { dev_kfree_skb_any(skb); return NET_RX_DROP; } skb_queue_tail(&rq->skb_queue, skb); return NET_RX_SUCCESS; } static int nsim_forward_skb(struct net_device *dev, struct sk_buff *skb, struct nsim_rq *rq) { return __dev_forward_skb(dev, skb) ?: nsim_napi_rx(rq, skb); } static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct netdevsim *ns = netdev_priv(dev); struct net_device *peer_dev; unsigned int len = skb->len; struct netdevsim *peer_ns; struct netdev_config *cfg; struct nsim_rq *rq; int rxq; rcu_read_lock(); if (!nsim_ipsec_tx(ns, skb)) goto out_drop_free; peer_ns = rcu_dereference(ns->peer); if (!peer_ns) goto out_drop_free; peer_dev = peer_ns->netdev; rxq = skb_get_queue_mapping(skb); if (rxq >= peer_dev->num_rx_queues) rxq = rxq % peer_dev->num_rx_queues; rq = peer_ns->rq[rxq]; cfg = peer_dev->cfg; if (skb_is_nonlinear(skb) && (cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED || (cfg->hds_config == ETHTOOL_TCP_DATA_SPLIT_ENABLED && cfg->hds_thresh > len))) skb_linearize(skb); skb_tx_timestamp(skb); if (unlikely(nsim_forward_skb(peer_dev, skb, rq) == NET_RX_DROP)) goto out_drop_cnt; if (!hrtimer_active(&rq->napi_timer)) hrtimer_start(&rq->napi_timer, us_to_ktime(5), HRTIMER_MODE_REL); rcu_read_unlock(); u64_stats_update_begin(&ns->syncp); ns->tx_packets++; ns->tx_bytes += len; u64_stats_update_end(&ns->syncp); return NETDEV_TX_OK; out_drop_free: dev_kfree_skb(skb); out_drop_cnt: rcu_read_unlock(); u64_stats_update_begin(&ns->syncp); ns->tx_dropped++; u64_stats_update_end(&ns->syncp); return NETDEV_TX_OK; } static void nsim_set_rx_mode(struct net_device *dev) { } static int nsim_change_mtu(struct net_device *dev, int new_mtu) { struct netdevsim *ns = netdev_priv(dev); if (ns->xdp.prog && !ns->xdp.prog->aux->xdp_has_frags && new_mtu > NSIM_XDP_MAX_MTU) return -EBUSY; WRITE_ONCE(dev->mtu, new_mtu); return 0; } static void nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct netdevsim *ns = netdev_priv(dev); unsigned int start; do { start = u64_stats_fetch_begin(&ns->syncp); stats->tx_bytes = ns->tx_bytes; stats->tx_packets = ns->tx_packets; stats->tx_dropped = ns->tx_dropped; } while (u64_stats_fetch_retry(&ns->syncp, start)); } static int nsim_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { return nsim_bpf_setup_tc_block_cb(type, type_data, cb_priv); } static int nsim_set_vf_mac(struct net_device *dev, int vf, u8 *mac) { struct netdevsim *ns = netdev_priv(dev); struct nsim_dev *nsim_dev = ns->nsim_dev; /* Only refuse multicast addresses, zero address can mean unset/any. */ if (vf >= nsim_dev_get_vfs(nsim_dev) || is_multicast_ether_addr(mac)) return -EINVAL; memcpy(nsim_dev->vfconfigs[vf].vf_mac, mac, ETH_ALEN); return 0; } static int nsim_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos, __be16 vlan_proto) { struct netdevsim *ns = netdev_priv(dev); struct nsim_dev *nsim_dev = ns->nsim_dev; if (vf >= nsim_dev_get_vfs(nsim_dev) || vlan > 4095 || qos > 7) return -EINVAL; nsim_dev->vfconfigs[vf].vlan = vlan; nsim_dev->vfconfigs[vf].qos = qos; nsim_dev->vfconfigs[vf].vlan_proto = vlan_proto; return 0; } static int nsim_set_vf_rate(struct net_device *dev, int vf, int min, int max) { struct netdevsim *ns = netdev_priv(dev); struct nsim_dev *nsim_dev = ns->nsim_dev; if (nsim_esw_mode_is_switchdev(ns->nsim_dev)) { pr_err("Not supported in switchdev mode. Please use devlink API.\n"); return -EOPNOTSUPP; } if (vf >= nsim_dev_get_vfs(nsim_dev)) return -EINVAL; nsim_dev->vfconfigs[vf].min_tx_rate = min; nsim_dev->vfconfigs[vf].max_tx_rate = max; return 0; } static int nsim_set_vf_spoofchk(struct net_device *dev, int vf, bool val) { struct netdevsim *ns = netdev_priv(dev); struct nsim_dev *nsim_dev = ns->nsim_dev; if (vf >= nsim_dev_get_vfs(nsim_dev)) return -EINVAL; nsim_dev->vfconfigs[vf].spoofchk_enabled = val; return 0; } static int nsim_set_vf_rss_query_en(struct net_device *dev, int vf, bool val) { struct netdevsim *ns = netdev_priv(dev); struct nsim_dev *nsim_dev = ns->nsim_dev; if (vf >= nsim_dev_get_vfs(nsim_dev)) return -EINVAL; nsim_dev->vfconfigs[vf].rss_query_enabled = val; return 0; } static int nsim_set_vf_trust(struct net_device *dev, int vf, bool val) { struct netdevsim *ns = netdev_priv(dev); struct nsim_dev *nsim_dev = ns->nsim_dev; if (vf >= nsim_dev_get_vfs(nsim_dev)) return -EINVAL; nsim_dev->vfconfigs[vf].trusted = val; return 0; } static int nsim_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi) { struct netdevsim *ns = netdev_priv(dev); struct nsim_dev *nsim_dev = ns->nsim_dev; if (vf >= nsim_dev_get_vfs(nsim_dev)) return -EINVAL; ivi->vf = vf; ivi->linkstate = nsim_dev->vfconfigs[vf].link_state; ivi->min_tx_rate = nsim_dev->vfconfigs[vf].min_tx_rate; ivi->max_tx_rate = nsim_dev->vfconfigs[vf].max_tx_rate; ivi->vlan = nsim_dev->vfconfigs[vf].vlan; ivi->vlan_proto = nsim_dev->vfconfigs[vf].vlan_proto; ivi->qos = nsim_dev->vfconfigs[vf].qos; memcpy(&ivi->mac, nsim_dev->vfconfigs[vf].vf_mac, ETH_ALEN); ivi->spoofchk = nsim_dev->vfconfigs[vf].spoofchk_enabled; ivi->trusted = nsim_dev->vfconfigs[vf].trusted; ivi->rss_query_en = nsim_dev->vfconfigs[vf].rss_query_enabled; return 0; } static int nsim_set_vf_link_state(struct net_device *dev, int vf, int state) { struct netdevsim *ns = netdev_priv(dev); struct nsim_dev *nsim_dev = ns->nsim_dev; if (vf >= nsim_dev_get_vfs(nsim_dev)) return -EINVAL; switch (state) { case IFLA_VF_LINK_STATE_AUTO: case IFLA_VF_LINK_STATE_ENABLE: case IFLA_VF_LINK_STATE_DISABLE: break; default: return -EINVAL; } nsim_dev->vfconfigs[vf].link_state = state; return 0; } static void nsim_taprio_stats(struct tc_taprio_qopt_stats *stats) { stats->window_drops = 0; stats->tx_overruns = 0; } static int nsim_setup_tc_taprio(struct net_device *dev, struct tc_taprio_qopt_offload *offload) { int err = 0; switch (offload->cmd) { case TAPRIO_CMD_REPLACE: case TAPRIO_CMD_DESTROY: break; case TAPRIO_CMD_STATS: nsim_taprio_stats(&offload->stats); break; default: err = -EOPNOTSUPP; } return err; } static LIST_HEAD(nsim_block_cb_list); static int nsim_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { struct netdevsim *ns = netdev_priv(dev); switch (type) { case TC_SETUP_QDISC_TAPRIO: return nsim_setup_tc_taprio(dev, type_data); case TC_SETUP_BLOCK: return flow_block_cb_setup_simple(type_data, &nsim_block_cb_list, nsim_setup_tc_block_cb, ns, ns, true); default: return -EOPNOTSUPP; } } static int nsim_set_features(struct net_device *dev, netdev_features_t features) { struct netdevsim *ns = netdev_priv(dev); if ((dev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC)) return nsim_bpf_disable_tc(ns); return 0; } static int nsim_get_iflink(const struct net_device *dev) { struct netdevsim *nsim, *peer; int iflink; nsim = netdev_priv(dev); rcu_read_lock(); peer = rcu_dereference(nsim->peer); iflink = peer ? READ_ONCE(peer->netdev->ifindex) : READ_ONCE(dev->ifindex); rcu_read_unlock(); return iflink; } static int nsim_rcv(struct nsim_rq *rq, int budget) { struct sk_buff *skb; int i; for (i = 0; i < budget; i++) { if (skb_queue_empty(&rq->skb_queue)) break; skb = skb_dequeue(&rq->skb_queue); netif_receive_skb(skb); } return i; } static int nsim_poll(struct napi_struct *napi, int budget) { struct nsim_rq *rq = container_of(napi, struct nsim_rq, napi); int done; done = nsim_rcv(rq, budget); napi_complete(napi); return done; } static int nsim_create_page_pool(struct page_pool **p, struct napi_struct *napi) { struct page_pool_params params = { .order = 0, .pool_size = NSIM_RING_SIZE, .nid = NUMA_NO_NODE, .dev = &napi->dev->dev, .napi = napi, .dma_dir = DMA_BIDIRECTIONAL, .netdev = napi->dev, }; struct page_pool *pool; pool = page_pool_create(&params); if (IS_ERR(pool)) return PTR_ERR(pool); *p = pool; return 0; } static int nsim_init_napi(struct netdevsim *ns) { struct net_device *dev = ns->netdev; struct nsim_rq *rq; int err, i; for (i = 0; i < dev->num_rx_queues; i++) { rq = ns->rq[i]; netif_napi_add_config_locked(dev, &rq->napi, nsim_poll, i); } for (i = 0; i < dev->num_rx_queues; i++) { rq = ns->rq[i]; err = nsim_create_page_pool(&rq->page_pool, &rq->napi); if (err) goto err_pp_destroy; } return 0; err_pp_destroy: while (i--) { page_pool_destroy(ns->rq[i]->page_pool); ns->rq[i]->page_pool = NULL; } for (i = 0; i < dev->num_rx_queues; i++) __netif_napi_del_locked(&ns->rq[i]->napi); return err; } static enum hrtimer_restart nsim_napi_schedule(struct hrtimer *timer) { struct nsim_rq *rq; rq = container_of(timer, struct nsim_rq, napi_timer); napi_schedule(&rq->napi); return HRTIMER_NORESTART; } static void nsim_rq_timer_init(struct nsim_rq *rq) { hrtimer_setup(&rq->napi_timer, nsim_napi_schedule, CLOCK_MONOTONIC, HRTIMER_MODE_REL); } static void nsim_enable_napi(struct netdevsim *ns) { struct net_device *dev = ns->netdev; int i; for (i = 0; i < dev->num_rx_queues; i++) { struct nsim_rq *rq = ns->rq[i]; netif_queue_set_napi(dev, i, NETDEV_QUEUE_TYPE_RX, &rq->napi); napi_enable_locked(&rq->napi); } } static int nsim_open(struct net_device *dev) { struct netdevsim *ns = netdev_priv(dev); int err; netdev_assert_locked(dev); err = nsim_init_napi(ns); if (err) return err; nsim_enable_napi(ns); return 0; } static void nsim_del_napi(struct netdevsim *ns) { struct net_device *dev = ns->netdev; int i; for (i = 0; i < dev->num_rx_queues; i++) { struct nsim_rq *rq = ns->rq[i]; napi_disable_locked(&rq->napi); __netif_napi_del_locked(&rq->napi); } synchronize_net(); for (i = 0; i < dev->num_rx_queues; i++) { page_pool_destroy(ns->rq[i]->page_pool); ns->rq[i]->page_pool = NULL; } } static int nsim_stop(struct net_device *dev) { struct netdevsim *ns = netdev_priv(dev); struct netdevsim *peer; netdev_assert_locked(dev); netif_carrier_off(dev); peer = rtnl_dereference(ns->peer); if (peer) netif_carrier_off(peer->netdev); nsim_del_napi(ns); return 0; } static int nsim_shaper_set(struct net_shaper_binding *binding, const struct net_shaper *shaper, struct netlink_ext_ack *extack) { return 0; } static int nsim_shaper_del(struct net_shaper_binding *binding, const struct net_shaper_handle *handle, struct netlink_ext_ack *extack) { return 0; } static int nsim_shaper_group(struct net_shaper_binding *binding, int leaves_count, const struct net_shaper *leaves, const struct net_shaper *root, struct netlink_ext_ack *extack) { return 0; } static void nsim_shaper_cap(struct net_shaper_binding *binding, enum net_shaper_scope scope, unsigned long *flags) { *flags = ULONG_MAX; } static const struct net_shaper_ops nsim_shaper_ops = { .set = nsim_shaper_set, .delete = nsim_shaper_del, .group = nsim_shaper_group, .capabilities = nsim_shaper_cap, }; static const struct net_device_ops nsim_netdev_ops = { .ndo_start_xmit = nsim_start_xmit, .ndo_set_rx_mode = nsim_set_rx_mode, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = nsim_change_mtu, .ndo_get_stats64 = nsim_get_stats64, .ndo_set_vf_mac = nsim_set_vf_mac, .ndo_set_vf_vlan = nsim_set_vf_vlan, .ndo_set_vf_rate = nsim_set_vf_rate, .ndo_set_vf_spoofchk = nsim_set_vf_spoofchk, .ndo_set_vf_trust = nsim_set_vf_trust, .ndo_get_vf_config = nsim_get_vf_config, .ndo_set_vf_link_state = nsim_set_vf_link_state, .ndo_set_vf_rss_query_en = nsim_set_vf_rss_query_en, .ndo_setup_tc = nsim_setup_tc, .ndo_set_features = nsim_set_features, .ndo_get_iflink = nsim_get_iflink, .ndo_bpf = nsim_bpf, .ndo_open = nsim_open, .ndo_stop = nsim_stop, .net_shaper_ops = &nsim_shaper_ops, }; static const struct net_device_ops nsim_vf_netdev_ops = { .ndo_start_xmit = nsim_start_xmit, .ndo_set_rx_mode = nsim_set_rx_mode, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = nsim_change_mtu, .ndo_get_stats64 = nsim_get_stats64, .ndo_setup_tc = nsim_setup_tc, .ndo_set_features = nsim_set_features, }; /* We don't have true per-queue stats, yet, so do some random fakery here. * Only report stuff for queue 0. */ static void nsim_get_queue_stats_rx(struct net_device *dev, int idx, struct netdev_queue_stats_rx *stats) { struct rtnl_link_stats64 rtstats = {}; if (!idx) nsim_get_stats64(dev, &rtstats); stats->packets = rtstats.rx_packets - !!rtstats.rx_packets; stats->bytes = rtstats.rx_bytes; } static void nsim_get_queue_stats_tx(struct net_device *dev, int idx, struct netdev_queue_stats_tx *stats) { struct rtnl_link_stats64 rtstats = {}; if (!idx) nsim_get_stats64(dev, &rtstats); stats->packets = rtstats.tx_packets - !!rtstats.tx_packets; stats->bytes = rtstats.tx_bytes; } static void nsim_get_base_stats(struct net_device *dev, struct netdev_queue_stats_rx *rx, struct netdev_queue_stats_tx *tx) { struct rtnl_link_stats64 rtstats = {}; nsim_get_stats64(dev, &rtstats); rx->packets = !!rtstats.rx_packets; rx->bytes = 0; tx->packets = !!rtstats.tx_packets; tx->bytes = 0; } static const struct netdev_stat_ops nsim_stat_ops = { .get_queue_stats_tx = nsim_get_queue_stats_tx, .get_queue_stats_rx = nsim_get_queue_stats_rx, .get_base_stats = nsim_get_base_stats, }; static struct nsim_rq *nsim_queue_alloc(void) { struct nsim_rq *rq; rq = kzalloc(sizeof(*rq), GFP_KERNEL_ACCOUNT); if (!rq) return NULL; skb_queue_head_init(&rq->skb_queue); nsim_rq_timer_init(rq); return rq; } static void nsim_queue_free(struct nsim_rq *rq) { hrtimer_cancel(&rq->napi_timer); skb_queue_purge_reason(&rq->skb_queue, SKB_DROP_REASON_QUEUE_PURGE); kfree(rq); } /* Queue reset mode is controlled by ns->rq_reset_mode. * - normal - new NAPI new pool (old NAPI enabled when new added) * - mode 1 - allocate new pool (NAPI is only disabled / enabled) * - mode 2 - new NAPI new pool (old NAPI removed before new added) * - mode 3 - new NAPI new pool (old NAPI disabled when new added) */ struct nsim_queue_mem { struct nsim_rq *rq; struct page_pool *pp; }; static int nsim_queue_mem_alloc(struct net_device *dev, void *per_queue_mem, int idx) { struct nsim_queue_mem *qmem = per_queue_mem; struct netdevsim *ns = netdev_priv(dev); int err; if (ns->rq_reset_mode > 3) return -EINVAL; if (ns->rq_reset_mode == 1) { if (!netif_running(ns->netdev)) return -ENETDOWN; return nsim_create_page_pool(&qmem->pp, &ns->rq[idx]->napi); } qmem->rq = nsim_queue_alloc(); if (!qmem->rq) return -ENOMEM; err = nsim_create_page_pool(&qmem->rq->page_pool, &qmem->rq->napi); if (err) goto err_free; if (!ns->rq_reset_mode) netif_napi_add_config_locked(dev, &qmem->rq->napi, nsim_poll, idx); return 0; err_free: nsim_queue_free(qmem->rq); return err; } static void nsim_queue_mem_free(struct net_device *dev, void *per_queue_mem) { struct nsim_queue_mem *qmem = per_queue_mem; struct netdevsim *ns = netdev_priv(dev); page_pool_destroy(qmem->pp); if (qmem->rq) { if (!ns->rq_reset_mode) netif_napi_del_locked(&qmem->rq->napi); page_pool_destroy(qmem->rq->page_pool); nsim_queue_free(qmem->rq); } } static int nsim_queue_start(struct net_device *dev, void *per_queue_mem, int idx) { struct nsim_queue_mem *qmem = per_queue_mem; struct netdevsim *ns = netdev_priv(dev); netdev_assert_locked(dev); if (ns->rq_reset_mode == 1) { ns->rq[idx]->page_pool = qmem->pp; napi_enable_locked(&ns->rq[idx]->napi); return 0; } /* netif_napi_add()/_del() should normally be called from alloc/free, * here we want to test various call orders. */ if (ns->rq_reset_mode == 2) { netif_napi_del_locked(&ns->rq[idx]->napi); netif_napi_add_config_locked(dev, &qmem->rq->napi, nsim_poll, idx); } else if (ns->rq_reset_mode == 3) { netif_napi_add_config_locked(dev, &qmem->rq->napi, nsim_poll, idx); netif_napi_del_locked(&ns->rq[idx]->napi); } ns->rq[idx] = qmem->rq; napi_enable_locked(&ns->rq[idx]->napi); return 0; } static int nsim_queue_stop(struct net_device *dev, void *per_queue_mem, int idx) { struct nsim_queue_mem *qmem = per_queue_mem; struct netdevsim *ns = netdev_priv(dev); netdev_assert_locked(dev); napi_disable_locked(&ns->rq[idx]->napi); if (ns->rq_reset_mode == 1) { qmem->pp = ns->rq[idx]->page_pool; page_pool_disable_direct_recycling(qmem->pp); } else { qmem->rq = ns->rq[idx]; } return 0; } static const struct netdev_queue_mgmt_ops nsim_queue_mgmt_ops = { .ndo_queue_mem_size = sizeof(struct nsim_queue_mem), .ndo_queue_mem_alloc = nsim_queue_mem_alloc, .ndo_queue_mem_free = nsim_queue_mem_free, .ndo_queue_start = nsim_queue_start, .ndo_queue_stop = nsim_queue_stop, }; static ssize_t nsim_qreset_write(struct file *file, const char __user *data, size_t count, loff_t *ppos) { struct netdevsim *ns = file->private_data; unsigned int queue, mode; char buf[32]; ssize_t ret; if (count >= sizeof(buf)) return -EINVAL; if (copy_from_user(buf, data, count)) return -EFAULT; buf[count] = '\0'; ret = sscanf(buf, "%u %u", &queue, &mode); if (ret != 2) return -EINVAL; netdev_lock(ns->netdev); if (queue >= ns->netdev->real_num_rx_queues) { ret = -EINVAL; goto exit_unlock; } ns->rq_reset_mode = mode; ret = netdev_rx_queue_restart(ns->netdev, queue); ns->rq_reset_mode = 0; if (ret) goto exit_unlock; ret = count; exit_unlock: netdev_unlock(ns->netdev); return ret; } static const struct file_operations nsim_qreset_fops = { .open = simple_open, .write = nsim_qreset_write, .owner = THIS_MODULE, }; static ssize_t nsim_pp_hold_read(struct file *file, char __user *data, size_t count, loff_t *ppos) { struct netdevsim *ns = file->private_data; char buf[3] = "n\n"; if (ns->page) buf[0] = 'y'; return simple_read_from_buffer(data, count, ppos, buf, 2); } static ssize_t nsim_pp_hold_write(struct file *file, const char __user *data, size_t count, loff_t *ppos) { struct netdevsim *ns = file->private_data; ssize_t ret; bool val; ret = kstrtobool_from_user(data, count, &val); if (ret) return ret; rtnl_lock(); ret = count; if (val == !!ns->page) goto exit; if (!netif_running(ns->netdev) && val) { ret = -ENETDOWN; } else if (val) { ns->page = page_pool_dev_alloc_pages(ns->rq[0]->page_pool); if (!ns->page) ret = -ENOMEM; } else { page_pool_put_full_page(ns->page->pp, ns->page, false); ns->page = NULL; } exit: rtnl_unlock(); return ret; } static const struct file_operations nsim_pp_hold_fops = { .open = simple_open, .read = nsim_pp_hold_read, .write = nsim_pp_hold_write, .llseek = generic_file_llseek, .owner = THIS_MODULE, }; static void nsim_setup(struct net_device *dev) { ether_setup(dev); eth_hw_addr_random(dev); dev->tx_queue_len = 0; dev->flags &= ~IFF_MULTICAST; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; dev->features |= NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | NETIF_F_TSO; dev->hw_features |= NETIF_F_HW_TC | NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | NETIF_F_TSO; dev->max_mtu = ETH_MAX_MTU; dev->xdp_features = NETDEV_XDP_ACT_HW_OFFLOAD; } static int nsim_queue_init(struct netdevsim *ns) { struct net_device *dev = ns->netdev; int i; ns->rq = kcalloc(dev->num_rx_queues, sizeof(*ns->rq), GFP_KERNEL_ACCOUNT); if (!ns->rq) return -ENOMEM; for (i = 0; i < dev->num_rx_queues; i++) { ns->rq[i] = nsim_queue_alloc(); if (!ns->rq[i]) goto err_free_prev; } return 0; err_free_prev: while (i--) kfree(ns->rq[i]); kfree(ns->rq); return -ENOMEM; } static void nsim_queue_uninit(struct netdevsim *ns) { struct net_device *dev = ns->netdev; int i; for (i = 0; i < dev->num_rx_queues; i++) nsim_queue_free(ns->rq[i]); kfree(ns->rq); ns->rq = NULL; } static int nsim_init_netdevsim(struct netdevsim *ns) { struct mock_phc *phc; int err; phc = mock_phc_create(&ns->nsim_bus_dev->dev); if (IS_ERR(phc)) return PTR_ERR(phc); ns->phc = phc; ns->netdev->netdev_ops = &nsim_netdev_ops; ns->netdev->stat_ops = &nsim_stat_ops; ns->netdev->queue_mgmt_ops = &nsim_queue_mgmt_ops; netdev_lockdep_set_classes(ns->netdev); err = nsim_udp_tunnels_info_create(ns->nsim_dev, ns->netdev); if (err) goto err_phc_destroy; rtnl_lock(); err = nsim_queue_init(ns); if (err) goto err_utn_destroy; err = nsim_bpf_init(ns); if (err) goto err_rq_destroy; nsim_macsec_init(ns); nsim_ipsec_init(ns); err = register_netdevice(ns->netdev); if (err) goto err_ipsec_teardown; rtnl_unlock(); if (IS_ENABLED(CONFIG_DEBUG_NET)) { ns->nb.notifier_call = netdev_debug_event; if (register_netdevice_notifier_dev_net(ns->netdev, &ns->nb, &ns->nn)) ns->nb.notifier_call = NULL; } return 0; err_ipsec_teardown: nsim_ipsec_teardown(ns); nsim_macsec_teardown(ns); nsim_bpf_uninit(ns); err_rq_destroy: nsim_queue_uninit(ns); err_utn_destroy: rtnl_unlock(); nsim_udp_tunnels_info_destroy(ns->netdev); err_phc_destroy: mock_phc_destroy(ns->phc); return err; } static int nsim_init_netdevsim_vf(struct netdevsim *ns) { int err; ns->netdev->netdev_ops = &nsim_vf_netdev_ops; rtnl_lock(); err = register_netdevice(ns->netdev); rtnl_unlock(); return err; } static void nsim_exit_netdevsim(struct netdevsim *ns) { nsim_udp_tunnels_info_destroy(ns->netdev); mock_phc_destroy(ns->phc); } struct netdevsim * nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port) { struct net_device *dev; struct netdevsim *ns; int err; dev = alloc_netdev_mq(sizeof(*ns), "eth%d", NET_NAME_UNKNOWN, nsim_setup, nsim_dev->nsim_bus_dev->num_queues); if (!dev) return ERR_PTR(-ENOMEM); dev_net_set(dev, nsim_dev_net(nsim_dev)); ns = netdev_priv(dev); ns->netdev = dev; u64_stats_init(&ns->syncp); ns->nsim_dev = nsim_dev; ns->nsim_dev_port = nsim_dev_port; ns->nsim_bus_dev = nsim_dev->nsim_bus_dev; SET_NETDEV_DEV(dev, &ns->nsim_bus_dev->dev); SET_NETDEV_DEVLINK_PORT(dev, &nsim_dev_port->devlink_port); nsim_ethtool_init(ns); if (nsim_dev_port_is_pf(nsim_dev_port)) err = nsim_init_netdevsim(ns); else err = nsim_init_netdevsim_vf(ns); if (err) goto err_free_netdev; ns->pp_dfs = debugfs_create_file("pp_hold", 0600, nsim_dev_port->ddir, ns, &nsim_pp_hold_fops); ns->qr_dfs = debugfs_create_file("queue_reset", 0200, nsim_dev_port->ddir, ns, &nsim_qreset_fops); return ns; err_free_netdev: free_netdev(dev); return ERR_PTR(err); } void nsim_destroy(struct netdevsim *ns) { struct net_device *dev = ns->netdev; struct netdevsim *peer; debugfs_remove(ns->qr_dfs); debugfs_remove(ns->pp_dfs); if (ns->nb.notifier_call) unregister_netdevice_notifier_dev_net(ns->netdev, &ns->nb, &ns->nn); rtnl_lock(); peer = rtnl_dereference(ns->peer); if (peer) RCU_INIT_POINTER(peer->peer, NULL); RCU_INIT_POINTER(ns->peer, NULL); unregister_netdevice(dev); if (nsim_dev_port_is_pf(ns->nsim_dev_port)) { nsim_macsec_teardown(ns); nsim_ipsec_teardown(ns); nsim_bpf_uninit(ns); nsim_queue_uninit(ns); } rtnl_unlock(); if (nsim_dev_port_is_pf(ns->nsim_dev_port)) nsim_exit_netdevsim(ns); /* Put this intentionally late to exercise the orphaning path */ if (ns->page) { page_pool_put_full_page(ns->page->pp, ns->page, false); ns->page = NULL; } free_netdev(dev); } bool netdev_is_nsim(struct net_device *dev) { return dev->netdev_ops == &nsim_netdev_ops; } static int nsim_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { NL_SET_ERR_MSG_MOD(extack, "Please use: echo \"[ID] [PORT_COUNT] [NUM_QUEUES]\" > /sys/bus/netdevsim/new_device"); return -EOPNOTSUPP; } static struct rtnl_link_ops nsim_link_ops __read_mostly = { .kind = DRV_NAME, .validate = nsim_validate, }; static int __init nsim_module_init(void) { int err; err = nsim_dev_init(); if (err) return err; err = nsim_bus_init(); if (err) goto err_dev_exit; err = rtnl_link_register(&nsim_link_ops); if (err) goto err_bus_exit; return 0; err_bus_exit: nsim_bus_exit(); err_dev_exit: nsim_dev_exit(); return err; } static void __exit nsim_module_exit(void) { rtnl_link_unregister(&nsim_link_ops); nsim_bus_exit(); nsim_dev_exit(); } module_init(nsim_module_init); module_exit(nsim_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Simulated networking device for testing"); MODULE_ALIAS_RTNL_LINK(DRV_NAME);
56 57 56 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 // SPDX-License-Identifier: GPL-2.0-or-later /* mpihelp-mul_3.c - MPI helper functions * Copyright (C) 1994, 1996, 1997, 1998, 2001 Free Software Foundation, Inc. * * This file is part of GnuPG. * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. * The GNU MP Library itself is published under the LGPL; * however I decided to publish this code under the plain GPL. */ #include "mpi-internal.h" #include "longlong.h" mpi_limb_t mpihelp_submul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, mpi_limb_t s2_limb) { mpi_limb_t cy_limb; mpi_size_t j; mpi_limb_t prod_high, prod_low; mpi_limb_t x; /* The loop counter and index J goes from -SIZE to -1. This way * the loop becomes faster. */ j = -s1_size; res_ptr -= j; s1_ptr -= j; cy_limb = 0; do { umul_ppmm(prod_high, prod_low, s1_ptr[j], s2_limb); prod_low += cy_limb; cy_limb = (prod_low < cy_limb ? 1 : 0) + prod_high; x = res_ptr[j]; prod_low = x - prod_low; cy_limb += prod_low > x ? 1 : 0; res_ptr[j] = prod_low; } while (++j); return cy_limb; }
4 4 1 3 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 // SPDX-License-Identifier: GPL-2.0-or-later /* * connector.c * * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net> * All rights reserved. */ #include <linux/compiler.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/list.h> #include <linux/skbuff.h> #include <net/netlink.h> #include <linux/moduleparam.h> #include <linux/connector.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/proc_fs.h> #include <linux/spinlock.h> #include <net/sock.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector."); MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_CONNECTOR); static struct cn_dev cdev; static int cn_already_initialized; /* * Sends mult (multiple) cn_msg at a time. * * msg->seq and msg->ack are used to determine message genealogy. * When someone sends message it puts there locally unique sequence * and random acknowledge numbers. Sequence number may be copied into * nlmsghdr->nlmsg_seq too. * * Sequence number is incremented with each message to be sent. * * If we expect a reply to our message then the sequence number in * received message MUST be the same as in original message, and * acknowledge number MUST be the same + 1. * * If we receive a message and its sequence number is not equal to the * one we are expecting then it is a new message. * * If we receive a message and its sequence number is the same as one * we are expecting but it's acknowledgement number is not equal to * the acknowledgement number in the original message + 1, then it is * a new message. * * If msg->len != len, then additional cn_msg messages are expected following * the first msg. * * The message is sent to, the portid if given, the group if given, both if * both, or if both are zero then the group is looked up and sent there. */ int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 __group, gfp_t gfp_mask, netlink_filter_fn filter, void *filter_data) { struct cn_callback_entry *__cbq; unsigned int size; struct sk_buff *skb; struct nlmsghdr *nlh; struct cn_msg *data; struct cn_dev *dev = &cdev; u32 group = 0; int found = 0; if (portid || __group) { group = __group; } else { spin_lock_bh(&dev->cbdev->queue_lock); list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { if (cn_cb_equal(&__cbq->id.id, &msg->id)) { found = 1; group = __cbq->group; break; } } spin_unlock_bh(&dev->cbdev->queue_lock); if (!found) return -ENODEV; } if (!portid && !netlink_has_listeners(dev->nls, group)) return -ESRCH; size = sizeof(*msg) + len; skb = nlmsg_new(size, gfp_mask); if (!skb) return -ENOMEM; nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size, 0); if (!nlh) { kfree_skb(skb); return -EMSGSIZE; } data = nlmsg_data(nlh); memcpy(data, msg, size); NETLINK_CB(skb).dst_group = group; if (group) return netlink_broadcast_filtered(dev->nls, skb, portid, group, gfp_mask, filter, (void *)filter_data); return netlink_unicast(dev->nls, skb, portid, !gfpflags_allow_blocking(gfp_mask)); } EXPORT_SYMBOL_GPL(cn_netlink_send_mult); /* same as cn_netlink_send_mult except msg->len is used for len */ int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group, gfp_t gfp_mask) { return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask, NULL, NULL); } EXPORT_SYMBOL_GPL(cn_netlink_send); /* * Callback helper - queues work and setup destructor for given data. */ static int cn_call_callback(struct sk_buff *skb) { struct nlmsghdr *nlh; struct cn_callback_entry *i, *cbq = NULL; struct cn_dev *dev = &cdev; struct cn_msg *msg = nlmsg_data(nlmsg_hdr(skb)); struct netlink_skb_parms *nsp = &NETLINK_CB(skb); int err = -ENODEV; /* verify msg->len is within skb */ nlh = nlmsg_hdr(skb); if (nlh->nlmsg_len < NLMSG_HDRLEN + sizeof(struct cn_msg) + msg->len) return -EINVAL; spin_lock_bh(&dev->cbdev->queue_lock); list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) { if (cn_cb_equal(&i->id.id, &msg->id)) { refcount_inc(&i->refcnt); cbq = i; break; } } spin_unlock_bh(&dev->cbdev->queue_lock); if (cbq != NULL) { cbq->callback(msg, nsp); kfree_skb(skb); cn_queue_release_callback(cbq); err = 0; } return err; } /* * Allow non-root access for NETLINK_CONNECTOR family having CN_IDX_PROC * multicast group. */ static int cn_bind(struct net *net, int group) { unsigned long groups = (unsigned long) group; if (ns_capable(net->user_ns, CAP_NET_ADMIN)) return 0; if (test_bit(CN_IDX_PROC - 1, &groups)) return 0; return -EPERM; } static void cn_release(struct sock *sk, unsigned long *groups) { if (groups && test_bit(CN_IDX_PROC - 1, groups)) { kfree(sk->sk_user_data); sk->sk_user_data = NULL; } } /* * Main netlink receiving function. * * It checks skb, netlink header and msg sizes, and calls callback helper. */ static void cn_rx_skb(struct sk_buff *skb) { struct nlmsghdr *nlh; int len, err; if (skb->len >= NLMSG_HDRLEN) { nlh = nlmsg_hdr(skb); len = nlmsg_len(nlh); if (len < (int)sizeof(struct cn_msg) || skb->len < nlh->nlmsg_len || len > CONNECTOR_MAX_MSG_SIZE) return; err = cn_call_callback(skb_get(skb)); if (err < 0) kfree_skb(skb); } } /* * Callback add routing - adds callback with given ID and name. * If there is registered callback with the same ID it will not be added. * * May sleep. */ int cn_add_callback(const struct cb_id *id, const char *name, void (*callback)(struct cn_msg *, struct netlink_skb_parms *)) { struct cn_dev *dev = &cdev; if (!cn_already_initialized) return -EAGAIN; return cn_queue_add_callback(dev->cbdev, name, id, callback); } EXPORT_SYMBOL_GPL(cn_add_callback); /* * Callback remove routing - removes callback * with given ID. * If there is no registered callback with given * ID nothing happens. * * May sleep while waiting for reference counter to become zero. */ void cn_del_callback(const struct cb_id *id) { struct cn_dev *dev = &cdev; cn_queue_del_callback(dev->cbdev, id); } EXPORT_SYMBOL_GPL(cn_del_callback); static int __maybe_unused cn_proc_show(struct seq_file *m, void *v) { struct cn_queue_dev *dev = cdev.cbdev; struct cn_callback_entry *cbq; seq_printf(m, "Name ID\n"); spin_lock_bh(&dev->queue_lock); list_for_each_entry(cbq, &dev->queue_list, callback_entry) { seq_printf(m, "%-15s %u:%u\n", cbq->id.name, cbq->id.id.idx, cbq->id.id.val); } spin_unlock_bh(&dev->queue_lock); return 0; } static int cn_init(void) { struct cn_dev *dev = &cdev; struct netlink_kernel_cfg cfg = { .groups = CN_NETLINK_USERS + 0xf, .input = cn_rx_skb, .flags = NL_CFG_F_NONROOT_RECV, .bind = cn_bind, .release = cn_release, }; dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, &cfg); if (!dev->nls) return -EIO; dev->cbdev = cn_queue_alloc_dev("cqueue", dev->nls); if (!dev->cbdev) { netlink_kernel_release(dev->nls); return -EINVAL; } cn_already_initialized = 1; proc_create_single("connector", S_IRUGO, init_net.proc_net, cn_proc_show); return 0; } static void cn_fini(void) { struct cn_dev *dev = &cdev; cn_already_initialized = 0; remove_proc_entry("connector", init_net.proc_net); cn_queue_free_dev(dev->cbdev); netlink_kernel_release(dev->nls); } subsys_initcall(cn_init); module_exit(cn_fini);
8114 8121 8124 14162 13240 16192 4132 5173 5184 21 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 /* SPDX-License-Identifier: GPL-2.0+ */ /* * Sleepable Read-Copy Update mechanism for mutual exclusion * * Copyright (C) IBM Corporation, 2006 * Copyright (C) Fujitsu, 2012 * * Author: Paul McKenney <paulmck@linux.ibm.com> * Lai Jiangshan <laijs@cn.fujitsu.com> * * For detailed explanation of Read-Copy Update mechanism see - * Documentation/RCU/ *.txt * */ #ifndef _LINUX_SRCU_H #define _LINUX_SRCU_H #include <linux/mutex.h> #include <linux/rcupdate.h> #include <linux/workqueue.h> #include <linux/rcu_segcblist.h> struct srcu_struct; #ifdef CONFIG_DEBUG_LOCK_ALLOC int __init_srcu_struct(struct srcu_struct *ssp, const char *name, struct lock_class_key *key); #define init_srcu_struct(ssp) \ ({ \ static struct lock_class_key __srcu_key; \ \ __init_srcu_struct((ssp), #ssp, &__srcu_key); \ }) #define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name }, #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ int init_srcu_struct(struct srcu_struct *ssp); #define __SRCU_DEP_MAP_INIT(srcu_name) #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ /* Values for SRCU Tree srcu_data ->srcu_reader_flavor, but also used by rcutorture. */ #define SRCU_READ_FLAVOR_NORMAL 0x1 // srcu_read_lock(). #define SRCU_READ_FLAVOR_NMI 0x2 // srcu_read_lock_nmisafe(). #define SRCU_READ_FLAVOR_LITE 0x4 // srcu_read_lock_lite(). #define SRCU_READ_FLAVOR_FAST 0x8 // srcu_read_lock_fast(). #define SRCU_READ_FLAVOR_ALL (SRCU_READ_FLAVOR_NORMAL | SRCU_READ_FLAVOR_NMI | \ SRCU_READ_FLAVOR_LITE | SRCU_READ_FLAVOR_FAST) // All of the above. #define SRCU_READ_FLAVOR_SLOWGP (SRCU_READ_FLAVOR_LITE | SRCU_READ_FLAVOR_FAST) // Flavors requiring synchronize_rcu() // instead of smp_mb(). void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp); #ifdef CONFIG_TINY_SRCU #include <linux/srcutiny.h> #elif defined(CONFIG_TREE_SRCU) #include <linux/srcutree.h> #else #error "Unknown SRCU implementation specified to kernel configuration" #endif void call_srcu(struct srcu_struct *ssp, struct rcu_head *head, void (*func)(struct rcu_head *head)); void cleanup_srcu_struct(struct srcu_struct *ssp); void synchronize_srcu(struct srcu_struct *ssp); #define SRCU_GET_STATE_COMPLETED 0x1 /** * get_completed_synchronize_srcu - Return a pre-completed polled state cookie * * Returns a value that poll_state_synchronize_srcu() will always treat * as a cookie whose grace period has already completed. */ static inline unsigned long get_completed_synchronize_srcu(void) { return SRCU_GET_STATE_COMPLETED; } unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp); unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp); bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie); // Maximum number of unsigned long values corresponding to // not-yet-completed SRCU grace periods. #define NUM_ACTIVE_SRCU_POLL_OLDSTATE 2 /** * same_state_synchronize_srcu - Are two old-state values identical? * @oldstate1: First old-state value. * @oldstate2: Second old-state value. * * The two old-state values must have been obtained from either * get_state_synchronize_srcu(), start_poll_synchronize_srcu(), or * get_completed_synchronize_srcu(). Returns @true if the two values are * identical and @false otherwise. This allows structures whose lifetimes * are tracked by old-state values to push these values to a list header, * allowing those structures to be slightly smaller. */ static inline bool same_state_synchronize_srcu(unsigned long oldstate1, unsigned long oldstate2) { return oldstate1 == oldstate2; } #ifdef CONFIG_NEED_SRCU_NMI_SAFE int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp); void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases(ssp); #else static inline int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) { return __srcu_read_lock(ssp); } static inline void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) { __srcu_read_unlock(ssp, idx); } #endif /* CONFIG_NEED_SRCU_NMI_SAFE */ void srcu_init(void); #ifdef CONFIG_DEBUG_LOCK_ALLOC /** * srcu_read_lock_held - might we be in SRCU read-side critical section? * @ssp: The srcu_struct structure to check * * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, * this assumes we are in an SRCU read-side critical section unless it can * prove otherwise. * * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot * and while lockdep is disabled. * * Note that SRCU is based on its own statemachine and it doesn't * relies on normal RCU, it can be called from the CPU which * is in the idle loop from an RCU point of view or offline. */ static inline int srcu_read_lock_held(const struct srcu_struct *ssp) { if (!debug_lockdep_rcu_enabled()) return 1; return lock_is_held(&ssp->dep_map); } /* * Annotations provide deadlock detection for SRCU. * * Similar to other lockdep annotations, except there is an additional * srcu_lock_sync(), which is basically an empty *write*-side critical section, * see lock_sync() for more information. */ /* Annotates a srcu_read_lock() */ static inline void srcu_lock_acquire(struct lockdep_map *map) { lock_map_acquire_read(map); } /* Annotates a srcu_read_lock() */ static inline void srcu_lock_release(struct lockdep_map *map) { lock_map_release(map); } /* Annotates a synchronize_srcu() */ static inline void srcu_lock_sync(struct lockdep_map *map) { lock_map_sync(map); } #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ static inline int srcu_read_lock_held(const struct srcu_struct *ssp) { return 1; } #define srcu_lock_acquire(m) do { } while (0) #define srcu_lock_release(m) do { } while (0) #define srcu_lock_sync(m) do { } while (0) #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ /** * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing * @p: the pointer to fetch and protect for later dereferencing * @ssp: pointer to the srcu_struct, which is used to check that we * really are in an SRCU read-side critical section. * @c: condition to check for update-side use * * If PROVE_RCU is enabled, invoking this outside of an RCU read-side * critical section will result in an RCU-lockdep splat, unless @c evaluates * to 1. The @c argument will normally be a logical expression containing * lockdep_is_held() calls. */ #define srcu_dereference_check(p, ssp, c) \ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \ (c) || srcu_read_lock_held(ssp), __rcu) /** * srcu_dereference - fetch SRCU-protected pointer for later dereferencing * @p: the pointer to fetch and protect for later dereferencing * @ssp: pointer to the srcu_struct, which is used to check that we * really are in an SRCU read-side critical section. * * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU * is enabled, invoking this outside of an RCU read-side critical * section will result in an RCU-lockdep splat. */ #define srcu_dereference(p, ssp) srcu_dereference_check((p), (ssp), 0) /** * srcu_dereference_notrace - no tracing and no lockdep calls from here * @p: the pointer to fetch and protect for later dereferencing * @ssp: pointer to the srcu_struct, which is used to check that we * really are in an SRCU read-side critical section. */ #define srcu_dereference_notrace(p, ssp) srcu_dereference_check((p), (ssp), 1) /** * srcu_read_lock - register a new reader for an SRCU-protected structure. * @ssp: srcu_struct in which to register the new reader. * * Enter an SRCU read-side critical section. Note that SRCU read-side * critical sections may be nested. However, it is illegal to * call anything that waits on an SRCU grace period for the same * srcu_struct, whether directly or indirectly. Please note that * one way to indirectly wait on an SRCU grace period is to acquire * a mutex that is held elsewhere while calling synchronize_srcu() or * synchronize_srcu_expedited(). * * The return value from srcu_read_lock() is guaranteed to be * non-negative. This value must be passed unaltered to the matching * srcu_read_unlock(). Note that srcu_read_lock() and the matching * srcu_read_unlock() must occur in the same context, for example, it is * illegal to invoke srcu_read_unlock() in an irq handler if the matching * srcu_read_lock() was invoked in process context. Or, for that matter to * invoke srcu_read_unlock() from one task and the matching srcu_read_lock() * from another. */ static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp) { int retval; srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); retval = __srcu_read_lock(ssp); srcu_lock_acquire(&ssp->dep_map); return retval; } /** * srcu_read_lock_fast - register a new reader for an SRCU-protected structure. * @ssp: srcu_struct in which to register the new reader. * * Enter an SRCU read-side critical section, but for a light-weight * smp_mb()-free reader. See srcu_read_lock() for more information. * * If srcu_read_lock_fast() is ever used on an srcu_struct structure, * then none of the other flavors may be used, whether before, during, * or after. Note that grace-period auto-expediting is disabled for _fast * srcu_struct structures because auto-expedited grace periods invoke * synchronize_rcu_expedited(), IPIs and all. * * Note that srcu_read_lock_fast() can be invoked only from those contexts * where RCU is watching, that is, from contexts where it would be legal * to invoke rcu_read_lock(). Otherwise, lockdep will complain. */ static inline struct srcu_ctr __percpu *srcu_read_lock_fast(struct srcu_struct *ssp) __acquires(ssp) { struct srcu_ctr __percpu *retval; srcu_check_read_flavor_force(ssp, SRCU_READ_FLAVOR_FAST); retval = __srcu_read_lock_fast(ssp); rcu_try_lock_acquire(&ssp->dep_map); return retval; } /** * srcu_down_read_fast - register a new reader for an SRCU-protected structure. * @ssp: srcu_struct in which to register the new reader. * * Enter a semaphore-like SRCU read-side critical section, but for * a light-weight smp_mb()-free reader. See srcu_read_lock_fast() and * srcu_down_read() for more information. * * The same srcu_struct may be used concurrently by srcu_down_read_fast() * and srcu_read_lock_fast(). */ static inline struct srcu_ctr __percpu *srcu_down_read_fast(struct srcu_struct *ssp) __acquires(ssp) { WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && in_nmi()); srcu_check_read_flavor_force(ssp, SRCU_READ_FLAVOR_FAST); return __srcu_read_lock_fast(ssp); } /** * srcu_read_lock_lite - register a new reader for an SRCU-protected structure. * @ssp: srcu_struct in which to register the new reader. * * Enter an SRCU read-side critical section, but for a light-weight * smp_mb()-free reader. See srcu_read_lock() for more information. * * If srcu_read_lock_lite() is ever used on an srcu_struct structure, * then none of the other flavors may be used, whether before, during, * or after. Note that grace-period auto-expediting is disabled for _lite * srcu_struct structures because auto-expedited grace periods invoke * synchronize_rcu_expedited(), IPIs and all. * * Note that srcu_read_lock_lite() can be invoked only from those contexts * where RCU is watching, that is, from contexts where it would be legal * to invoke rcu_read_lock(). Otherwise, lockdep will complain. */ static inline int srcu_read_lock_lite(struct srcu_struct *ssp) __acquires(ssp) { int retval; srcu_check_read_flavor_force(ssp, SRCU_READ_FLAVOR_LITE); retval = __srcu_read_lock_lite(ssp); rcu_try_lock_acquire(&ssp->dep_map); return retval; } /** * srcu_read_lock_nmisafe - register a new reader for an SRCU-protected structure. * @ssp: srcu_struct in which to register the new reader. * * Enter an SRCU read-side critical section, but in an NMI-safe manner. * See srcu_read_lock() for more information. * * If srcu_read_lock_nmisafe() is ever used on an srcu_struct structure, * then none of the other flavors may be used, whether before, during, * or after. */ static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp) { int retval; srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI); retval = __srcu_read_lock_nmisafe(ssp); rcu_try_lock_acquire(&ssp->dep_map); return retval; } /* Used by tracing, cannot be traced and cannot invoke lockdep. */ static inline notrace int srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp) { int retval; srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); retval = __srcu_read_lock(ssp); return retval; } /** * srcu_down_read - register a new reader for an SRCU-protected structure. * @ssp: srcu_struct in which to register the new reader. * * Enter a semaphore-like SRCU read-side critical section. Note that * SRCU read-side critical sections may be nested. However, it is * illegal to call anything that waits on an SRCU grace period for the * same srcu_struct, whether directly or indirectly. Please note that * one way to indirectly wait on an SRCU grace period is to acquire * a mutex that is held elsewhere while calling synchronize_srcu() or * synchronize_srcu_expedited(). But if you want lockdep to help you * keep this stuff straight, you should instead use srcu_read_lock(). * * The semaphore-like nature of srcu_down_read() means that the matching * srcu_up_read() can be invoked from some other context, for example, * from some other task or from an irq handler. However, neither * srcu_down_read() nor srcu_up_read() may be invoked from an NMI handler. * * Calls to srcu_down_read() may be nested, similar to the manner in * which calls to down_read() may be nested. The same srcu_struct may be * used concurrently by srcu_down_read() and srcu_read_lock(). */ static inline int srcu_down_read(struct srcu_struct *ssp) __acquires(ssp) { WARN_ON_ONCE(in_nmi()); srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); return __srcu_read_lock(ssp); } /** * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. * @ssp: srcu_struct in which to unregister the old reader. * @idx: return value from corresponding srcu_read_lock(). * * Exit an SRCU read-side critical section. */ static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp) { WARN_ON_ONCE(idx & ~0x1); srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); srcu_lock_release(&ssp->dep_map); __srcu_read_unlock(ssp, idx); } /** * srcu_read_unlock_fast - unregister a old reader from an SRCU-protected structure. * @ssp: srcu_struct in which to unregister the old reader. * @scp: return value from corresponding srcu_read_lock_fast(). * * Exit a light-weight SRCU read-side critical section. */ static inline void srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp) __releases(ssp) { srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST); srcu_lock_release(&ssp->dep_map); __srcu_read_unlock_fast(ssp, scp); } /** * srcu_up_read_fast - unregister a old reader from an SRCU-protected structure. * @ssp: srcu_struct in which to unregister the old reader. * @scp: return value from corresponding srcu_read_lock_fast(). * * Exit an SRCU read-side critical section, but not necessarily from * the same context as the maching srcu_down_read_fast(). */ static inline void srcu_up_read_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp) __releases(ssp) { WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && in_nmi()); srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST); __srcu_read_unlock_fast(ssp, scp); } /** * srcu_read_unlock_lite - unregister a old reader from an SRCU-protected structure. * @ssp: srcu_struct in which to unregister the old reader. * @idx: return value from corresponding srcu_read_lock_lite(). * * Exit a light-weight SRCU read-side critical section. */ static inline void srcu_read_unlock_lite(struct srcu_struct *ssp, int idx) __releases(ssp) { WARN_ON_ONCE(idx & ~0x1); srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_LITE); srcu_lock_release(&ssp->dep_map); __srcu_read_unlock_lite(ssp, idx); } /** * srcu_read_unlock_nmisafe - unregister a old reader from an SRCU-protected structure. * @ssp: srcu_struct in which to unregister the old reader. * @idx: return value from corresponding srcu_read_lock_nmisafe(). * * Exit an SRCU read-side critical section, but in an NMI-safe manner. */ static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases(ssp) { WARN_ON_ONCE(idx & ~0x1); srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI); rcu_lock_release(&ssp->dep_map); __srcu_read_unlock_nmisafe(ssp, idx); } /* Used by tracing, cannot be traced and cannot call lockdep. */ static inline notrace void srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp) { srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); __srcu_read_unlock(ssp, idx); } /** * srcu_up_read - unregister a old reader from an SRCU-protected structure. * @ssp: srcu_struct in which to unregister the old reader. * @idx: return value from corresponding srcu_read_lock(). * * Exit an SRCU read-side critical section, but not necessarily from * the same context as the maching srcu_down_read(). */ static inline void srcu_up_read(struct srcu_struct *ssp, int idx) __releases(ssp) { WARN_ON_ONCE(idx & ~0x1); WARN_ON_ONCE(in_nmi()); srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); __srcu_read_unlock(ssp, idx); } /** * smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock * * Converts the preceding srcu_read_unlock into a two-way memory barrier. * * Call this after srcu_read_unlock, to guarantee that all memory operations * that occur after smp_mb__after_srcu_read_unlock will appear to happen after * the preceding srcu_read_unlock. */ static inline void smp_mb__after_srcu_read_unlock(void) { /* __srcu_read_unlock has smp_mb() internally so nothing to do here. */ } /** * smp_mb__after_srcu_read_lock - ensure full ordering after srcu_read_lock * * Converts the preceding srcu_read_lock into a two-way memory barrier. * * Call this after srcu_read_lock, to guarantee that all memory operations * that occur after smp_mb__after_srcu_read_lock will appear to happen after * the preceding srcu_read_lock. */ static inline void smp_mb__after_srcu_read_lock(void) { /* __srcu_read_lock has smp_mb() internally so nothing to do here. */ } DEFINE_LOCK_GUARD_1(srcu, struct srcu_struct, _T->idx = srcu_read_lock(_T->lock), srcu_read_unlock(_T->lock, _T->idx), int idx) #endif
10 10 10 34 694 694 694 694 6 6 7 5 2 1 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 1995 Linus Torvalds * * Pentium III FXSR, SSE support * Gareth Hughes <gareth@valinux.com>, May 2000 * * X86-64 port * Andi Kleen. * * CPU hotplug support - ashok.raj@intel.com */ /* * This file handles the architecture-dependent parts of process handling.. */ #include <linux/cpu.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/sched/task.h> #include <linux/sched/task_stack.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/elfcore.h> #include <linux/smp.h> #include <linux/slab.h> #include <linux/user.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/ptrace.h> #include <linux/notifier.h> #include <linux/kprobes.h> #include <linux/kdebug.h> #include <linux/prctl.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/ftrace.h> #include <linux/syscalls.h> #include <linux/iommu.h> #include <asm/processor.h> #include <asm/pkru.h> #include <asm/fpu/sched.h> #include <asm/mmu_context.h> #include <asm/prctl.h> #include <asm/desc.h> #include <asm/proto.h> #include <asm/ia32.h> #include <asm/debugreg.h> #include <asm/switch_to.h> #include <asm/xen/hypervisor.h> #include <asm/vdso.h> #include <asm/resctrl.h> #include <asm/unistd.h> #include <asm/fsgsbase.h> #include <asm/fred.h> #ifdef CONFIG_IA32_EMULATION /* Not included via unistd.h */ #include <asm/unistd_32_ia32.h> #endif #include "process.h" /* Prints also some state that isn't saved in the pt_regs */ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode, const char *log_lvl) { unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; unsigned long d0, d1, d2, d3, d6, d7; unsigned int fsindex, gsindex; unsigned int ds, es; show_iret_regs(regs, log_lvl); if (regs->orig_ax != -1) pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax); else pr_cont("\n"); printk("%sRAX: %016lx RBX: %016lx RCX: %016lx\n", log_lvl, regs->ax, regs->bx, regs->cx); printk("%sRDX: %016lx RSI: %016lx RDI: %016lx\n", log_lvl, regs->dx, regs->si, regs->di); printk("%sRBP: %016lx R08: %016lx R09: %016lx\n", log_lvl, regs->bp, regs->r8, regs->r9); printk("%sR10: %016lx R11: %016lx R12: %016lx\n", log_lvl, regs->r10, regs->r11, regs->r12); printk("%sR13: %016lx R14: %016lx R15: %016lx\n", log_lvl, regs->r13, regs->r14, regs->r15); if (mode == SHOW_REGS_SHORT) return; if (mode == SHOW_REGS_USER) { rdmsrl(MSR_FS_BASE, fs); rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); printk("%sFS: %016lx GS: %016lx\n", log_lvl, fs, shadowgs); return; } asm("movl %%ds,%0" : "=r" (ds)); asm("movl %%es,%0" : "=r" (es)); asm("movl %%fs,%0" : "=r" (fsindex)); asm("movl %%gs,%0" : "=r" (gsindex)); rdmsrl(MSR_FS_BASE, fs); rdmsrl(MSR_GS_BASE, gs); rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); cr0 = read_cr0(); cr2 = read_cr2(); cr3 = __read_cr3(); cr4 = __read_cr4(); printk("%sFS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", log_lvl, fs, fsindex, gs, gsindex, shadowgs); printk("%sCS: %04x DS: %04x ES: %04x CR0: %016lx\n", log_lvl, regs->cs, ds, es, cr0); printk("%sCR2: %016lx CR3: %016lx CR4: %016lx\n", log_lvl, cr2, cr3, cr4); get_debugreg(d0, 0); get_debugreg(d1, 1); get_debugreg(d2, 2); get_debugreg(d3, 3); get_debugreg(d6, 6); get_debugreg(d7, 7); /* Only print out debug registers if they are in their non-default state. */ if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) && (d6 == DR6_RESERVED) && (d7 == 0x400))) { printk("%sDR0: %016lx DR1: %016lx DR2: %016lx\n", log_lvl, d0, d1, d2); printk("%sDR3: %016lx DR6: %016lx DR7: %016lx\n", log_lvl, d3, d6, d7); } if (cr4 & X86_CR4_PKE) printk("%sPKRU: %08x\n", log_lvl, read_pkru()); } void release_thread(struct task_struct *dead_task) { WARN_ON(dead_task->mm); } enum which_selector { FS, GS }; /* * Out of line to be protected from kprobes and tracing. If this would be * traced or probed than any access to a per CPU variable happens with * the wrong GS. * * It is not used on Xen paravirt. When paravirt support is needed, it * needs to be renamed with native_ prefix. */ static noinstr unsigned long __rdgsbase_inactive(void) { unsigned long gsbase; lockdep_assert_irqs_disabled(); /* * SWAPGS is no longer needed thus NOT allowed with FRED because * FRED transitions ensure that an operating system can _always_ * operate with its own GS base address: * - For events that occur in ring 3, FRED event delivery swaps * the GS base address with the IA32_KERNEL_GS_BASE MSR. * - ERETU (the FRED transition that returns to ring 3) also swaps * the GS base address with the IA32_KERNEL_GS_BASE MSR. * * And the operating system can still setup the GS segment for a * user thread without the need of loading a user thread GS with: * - Using LKGS, available with FRED, to modify other attributes * of the GS segment without compromising its ability always to * operate with its own GS base address. * - Accessing the GS segment base address for a user thread as * before using RDMSR or WRMSR on the IA32_KERNEL_GS_BASE MSR. * * Note, LKGS loads the GS base address into the IA32_KERNEL_GS_BASE * MSR instead of the GS segment’s descriptor cache. As such, the * operating system never changes its runtime GS base address. */ if (!cpu_feature_enabled(X86_FEATURE_FRED) && !cpu_feature_enabled(X86_FEATURE_XENPV)) { native_swapgs(); gsbase = rdgsbase(); native_swapgs(); } else { instrumentation_begin(); rdmsrl(MSR_KERNEL_GS_BASE, gsbase); instrumentation_end(); } return gsbase; } /* * Out of line to be protected from kprobes and tracing. If this would be * traced or probed than any access to a per CPU variable happens with * the wrong GS. * * It is not used on Xen paravirt. When paravirt support is needed, it * needs to be renamed with native_ prefix. */ static noinstr void __wrgsbase_inactive(unsigned long gsbase) { lockdep_assert_irqs_disabled(); if (!cpu_feature_enabled(X86_FEATURE_FRED) && !cpu_feature_enabled(X86_FEATURE_XENPV)) { native_swapgs(); wrgsbase(gsbase); native_swapgs(); } else { instrumentation_begin(); wrmsrl(MSR_KERNEL_GS_BASE, gsbase); instrumentation_end(); } } /* * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are * not available. The goal is to be reasonably fast on non-FSGSBASE systems. * It's forcibly inlined because it'll generate better code and this function * is hot. */ static __always_inline void save_base_legacy(struct task_struct *prev_p, unsigned short selector, enum which_selector which) { if (likely(selector == 0)) { /* * On Intel (without X86_BUG_NULL_SEG), the segment base could * be the pre-existing saved base or it could be zero. On AMD * (with X86_BUG_NULL_SEG), the segment base could be almost * anything. * * This branch is very hot (it's hit twice on almost every * context switch between 64-bit programs), and avoiding * the RDMSR helps a lot, so we just assume that whatever * value is already saved is correct. This matches historical * Linux behavior, so it won't break existing applications. * * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we * report that the base is zero, it needs to actually be zero: * see the corresponding logic in load_seg_legacy. */ } else { /* * If the selector is 1, 2, or 3, then the base is zero on * !X86_BUG_NULL_SEG CPUs and could be anything on * X86_BUG_NULL_SEG CPUs. In the latter case, Linux * has never attempted to preserve the base across context * switches. * * If selector > 3, then it refers to a real segment, and * saving the base isn't necessary. */ if (which == FS) prev_p->thread.fsbase = 0; else prev_p->thread.gsbase = 0; } } static __always_inline void save_fsgs(struct task_struct *task) { savesegment(fs, task->thread.fsindex); savesegment(gs, task->thread.gsindex); if (static_cpu_has(X86_FEATURE_FSGSBASE)) { /* * If FSGSBASE is enabled, we can't make any useful guesses * about the base, and user code expects us to save the current * value. Fortunately, reading the base directly is efficient. */ task->thread.fsbase = rdfsbase(); task->thread.gsbase = __rdgsbase_inactive(); } else { save_base_legacy(task, task->thread.fsindex, FS); save_base_legacy(task, task->thread.gsindex, GS); } } /* * While a process is running,current->thread.fsbase and current->thread.gsbase * may not match the corresponding CPU registers (see save_base_legacy()). */ void current_save_fsgs(void) { unsigned long flags; /* Interrupts need to be off for FSGSBASE */ local_irq_save(flags); save_fsgs(current); local_irq_restore(flags); } #if IS_ENABLED(CONFIG_KVM) EXPORT_SYMBOL_GPL(current_save_fsgs); #endif static __always_inline void loadseg(enum which_selector which, unsigned short sel) { if (which == FS) loadsegment(fs, sel); else load_gs_index(sel); } static __always_inline void load_seg_legacy(unsigned short prev_index, unsigned long prev_base, unsigned short next_index, unsigned long next_base, enum which_selector which) { if (likely(next_index <= 3)) { /* * The next task is using 64-bit TLS, is not using this * segment at all, or is having fun with arcane CPU features. */ if (next_base == 0) { /* * Nasty case: on AMD CPUs, we need to forcibly zero * the base. */ if (static_cpu_has_bug(X86_BUG_NULL_SEG)) { loadseg(which, __USER_DS); loadseg(which, next_index); } else { /* * We could try to exhaustively detect cases * under which we can skip the segment load, * but there's really only one case that matters * for performance: if both the previous and * next states are fully zeroed, we can skip * the load. * * (This assumes that prev_base == 0 has no * false positives. This is the case on * Intel-style CPUs.) */ if (likely(prev_index | next_index | prev_base)) loadseg(which, next_index); } } else { if (prev_index != next_index) loadseg(which, next_index); wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE, next_base); } } else { /* * The next task is using a real segment. Loading the selector * is sufficient. */ loadseg(which, next_index); } } /* * Store prev's PKRU value and load next's PKRU value if they differ. PKRU * is not XSTATE managed on context switch because that would require a * lookup in the task's FPU xsave buffer and require to keep that updated * in various places. */ static __always_inline void x86_pkru_load(struct thread_struct *prev, struct thread_struct *next) { if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) return; /* Stash the prev task's value: */ prev->pkru = rdpkru(); /* * PKRU writes are slightly expensive. Avoid them when not * strictly necessary: */ if (prev->pkru != next->pkru) wrpkru(next->pkru); } static __always_inline void x86_fsgsbase_load(struct thread_struct *prev, struct thread_struct *next) { if (static_cpu_has(X86_FEATURE_FSGSBASE)) { /* Update the FS and GS selectors if they could have changed. */ if (unlikely(prev->fsindex || next->fsindex)) loadseg(FS, next->fsindex); if (unlikely(prev->gsindex || next->gsindex)) loadseg(GS, next->gsindex); /* Update the bases. */ wrfsbase(next->fsbase); __wrgsbase_inactive(next->gsbase); } else { load_seg_legacy(prev->fsindex, prev->fsbase, next->fsindex, next->fsbase, FS); load_seg_legacy(prev->gsindex, prev->gsbase, next->gsindex, next->gsbase, GS); } } unsigned long x86_fsgsbase_read_task(struct task_struct *task, unsigned short selector) { unsigned short idx = selector >> 3; unsigned long base; if (likely((selector & SEGMENT_TI_MASK) == 0)) { if (unlikely(idx >= GDT_ENTRIES)) return 0; /* * There are no user segments in the GDT with nonzero bases * other than the TLS segments. */ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return 0; idx -= GDT_ENTRY_TLS_MIN; base = get_desc_base(&task->thread.tls_array[idx]); } else { #ifdef CONFIG_MODIFY_LDT_SYSCALL struct ldt_struct *ldt; /* * If performance here mattered, we could protect the LDT * with RCU. This is a slow path, though, so we can just * take the mutex. */ mutex_lock(&task->mm->context.lock); ldt = task->mm->context.ldt; if (unlikely(!ldt || idx >= ldt->nr_entries)) base = 0; else base = get_desc_base(ldt->entries + idx); mutex_unlock(&task->mm->context.lock); #else base = 0; #endif } return base; } unsigned long x86_gsbase_read_cpu_inactive(void) { unsigned long gsbase; if (boot_cpu_has(X86_FEATURE_FSGSBASE)) { unsigned long flags; local_irq_save(flags); gsbase = __rdgsbase_inactive(); local_irq_restore(flags); } else { rdmsrl(MSR_KERNEL_GS_BASE, gsbase); } return gsbase; } void x86_gsbase_write_cpu_inactive(unsigned long gsbase) { if (boot_cpu_has(X86_FEATURE_FSGSBASE)) { unsigned long flags; local_irq_save(flags); __wrgsbase_inactive(gsbase); local_irq_restore(flags); } else { wrmsrl(MSR_KERNEL_GS_BASE, gsbase); } } unsigned long x86_fsbase_read_task(struct task_struct *task) { unsigned long fsbase; if (task == current) fsbase = x86_fsbase_read_cpu(); else if (boot_cpu_has(X86_FEATURE_FSGSBASE) || (task->thread.fsindex == 0)) fsbase = task->thread.fsbase; else fsbase = x86_fsgsbase_read_task(task, task->thread.fsindex); return fsbase; } unsigned long x86_gsbase_read_task(struct task_struct *task) { unsigned long gsbase; if (task == current) gsbase = x86_gsbase_read_cpu_inactive(); else if (boot_cpu_has(X86_FEATURE_FSGSBASE) || (task->thread.gsindex == 0)) gsbase = task->thread.gsbase; else gsbase = x86_fsgsbase_read_task(task, task->thread.gsindex); return gsbase; } void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase) { WARN_ON_ONCE(task == current); task->thread.fsbase = fsbase; } void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase) { WARN_ON_ONCE(task == current); task->thread.gsbase = gsbase; } static void start_thread_common(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp, u16 _cs, u16 _ss, u16 _ds) { WARN_ON_ONCE(regs != current_pt_regs()); if (static_cpu_has(X86_BUG_NULL_SEG)) { /* Loading zero below won't clear the base. */ loadsegment(fs, __USER_DS); load_gs_index(__USER_DS); } reset_thread_features(); loadsegment(fs, 0); loadsegment(es, _ds); loadsegment(ds, _ds); load_gs_index(0); regs->ip = new_ip; regs->sp = new_sp; regs->csx = _cs; regs->ssx = _ss; /* * Allow single-step trap and NMI when starting a new task, thus * once the new task enters user space, single-step trap and NMI * are both enabled immediately. * * Entering a new task is logically speaking a return from a * system call (exec, fork, clone, etc.). As such, if ptrace * enables single stepping a single step exception should be * allowed to trigger immediately upon entering user space. * This is not optional. * * NMI should *never* be disabled in user space. As such, this * is an optional, opportunistic way to catch errors. * * Paranoia: High-order 48 bits above the lowest 16 bit SS are * discarded by the legacy IRET instruction on all Intel, AMD, * and Cyrix/Centaur/VIA CPUs, thus can be set unconditionally, * even when FRED is not enabled. But we choose the safer side * to use these bits only when FRED is enabled. */ if (cpu_feature_enabled(X86_FEATURE_FRED)) { regs->fred_ss.swevent = true; regs->fred_ss.nmi = true; } regs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED; } void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) { start_thread_common(regs, new_ip, new_sp, __USER_CS, __USER_DS, 0); } EXPORT_SYMBOL_GPL(start_thread); #ifdef CONFIG_COMPAT void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp, bool x32) { start_thread_common(regs, new_ip, new_sp, x32 ? __USER_CS : __USER32_CS, __USER_DS, __USER_DS); } #endif /* * switch_to(x,y) should switch tasks from x to y. * * This could still be optimized: * - fold all the options into a flag word and test it with a single test. * - could test fs/gs bitsliced * * Kprobes not supported here. Set the probe on schedule instead. * Function graph tracer not supported too. */ __no_kmsan_checks __visible __notrace_funcgraph struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev = &prev_p->thread; struct thread_struct *next = &next_p->thread; int cpu = smp_processor_id(); WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) && this_cpu_read(hardirq_stack_inuse)); if (!test_tsk_thread_flag(prev_p, TIF_NEED_FPU_LOAD)) switch_fpu_prepare(prev_p, cpu); /* We must save %fs and %gs before load_TLS() because * %fs and %gs may be cleared by load_TLS(). * * (e.g. xen_load_tls()) */ save_fsgs(prev_p); /* * Load TLS before restoring any segments so that segment loads * reference the correct GDT entries. */ load_TLS(next, cpu); /* * Leave lazy mode, flushing any hypercalls made here. This * must be done after loading TLS entries in the GDT but before * loading segments that might reference them. */ arch_end_context_switch(next_p); /* Switch DS and ES. * * Reading them only returns the selectors, but writing them (if * nonzero) loads the full descriptor from the GDT or LDT. The * LDT for next is loaded in switch_mm, and the GDT is loaded * above. * * We therefore need to write new values to the segment * registers on every context switch unless both the new and old * values are zero. * * Note that we don't need to do anything for CS and SS, as * those are saved and restored as part of pt_regs. */ savesegment(es, prev->es); if (unlikely(next->es | prev->es)) loadsegment(es, next->es); savesegment(ds, prev->ds); if (unlikely(next->ds | prev->ds)) loadsegment(ds, next->ds); x86_fsgsbase_load(prev, next); x86_pkru_load(prev, next); /* * Switch the PDA and FPU contexts. */ raw_cpu_write(current_task, next_p); raw_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p)); switch_fpu_finish(next_p); /* Reload sp0. */ update_task_stack(next_p); switch_to_extra(prev_p, next_p); if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) { /* * AMD CPUs have a misfeature: SYSRET sets the SS selector but * does not update the cached descriptor. As a result, if we * do SYSRET while SS is NULL, we'll end up in user mode with * SS apparently equal to __USER_DS but actually unusable. * * The straightforward workaround would be to fix it up just * before SYSRET, but that would slow down the system call * fast paths. Instead, we ensure that SS is never NULL in * system call context. We do this by replacing NULL SS * selectors at every context switch. SYSCALL sets up a valid * SS, so the only way to get NULL is to re-enter the kernel * from CPL 3 through an interrupt. Since that can't happen * in the same task as a running syscall, we are guaranteed to * context switch between every interrupt vector entry and a * subsequent SYSRET. * * We read SS first because SS reads are much faster than * writes. Out of caution, we force SS to __KERNEL_DS even if * it previously had a different non-NULL value. */ unsigned short ss_sel; savesegment(ss, ss_sel); if (ss_sel != __KERNEL_DS) loadsegment(ss, __KERNEL_DS); } /* Load the Intel cache allocation PQR MSR. */ resctrl_sched_in(next_p); return prev_p; } void set_personality_64bit(void) { /* inherit personality from parent */ /* Make sure to be in 64bit mode */ clear_thread_flag(TIF_ADDR32); /* Pretend that this comes from a 64bit execve */ task_pt_regs(current)->orig_ax = __NR_execve; current_thread_info()->status &= ~TS_COMPAT; if (current->mm) __set_bit(MM_CONTEXT_HAS_VSYSCALL, &current->mm->context.flags); /* TBD: overwrites user setup. Should have two bits. But 64bit processes have always behaved this way, so it's not too bad. The main problem is just that 32bit children are affected again. */ current->personality &= ~READ_IMPLIES_EXEC; } static void __set_personality_x32(void) { #ifdef CONFIG_X86_X32_ABI if (current->mm) current->mm->context.flags = 0; current->personality &= ~READ_IMPLIES_EXEC; /* * in_32bit_syscall() uses the presence of the x32 syscall bit * flag to determine compat status. The x86 mmap() code relies on * the syscall bitness so set x32 syscall bit right here to make * in_32bit_syscall() work during exec(). * * Pretend to come from a x32 execve. */ task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT; current_thread_info()->status &= ~TS_COMPAT; #endif } static void __set_personality_ia32(void) { #ifdef CONFIG_IA32_EMULATION if (current->mm) { /* * uprobes applied to this MM need to know this and * cannot use user_64bit_mode() at that time. */ __set_bit(MM_CONTEXT_UPROBE_IA32, &current->mm->context.flags); } current->personality |= force_personality32; /* Prepare the first "return" to user space */ task_pt_regs(current)->orig_ax = __NR_ia32_execve; current_thread_info()->status |= TS_COMPAT; #endif } void set_personality_ia32(bool x32) { /* Make sure to be in 32bit mode */ set_thread_flag(TIF_ADDR32); if (x32) __set_personality_x32(); else __set_personality_ia32(); } EXPORT_SYMBOL_GPL(set_personality_ia32); #ifdef CONFIG_CHECKPOINT_RESTORE static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr) { int ret; ret = map_vdso_once(image, addr); if (ret) return ret; return (long)image->size; } #endif #ifdef CONFIG_ADDRESS_MASKING #define LAM_U57_BITS 6 static void enable_lam_func(void *__mm) { struct mm_struct *mm = __mm; unsigned long lam; if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm) { lam = mm_lam_cr3_mask(mm); write_cr3(__read_cr3() | lam); cpu_tlbstate_update_lam(lam, mm_untag_mask(mm)); } } static void mm_enable_lam(struct mm_struct *mm) { mm->context.lam_cr3_mask = X86_CR3_LAM_U57; mm->context.untag_mask = ~GENMASK(62, 57); /* * Even though the process must still be single-threaded at this * point, kernel threads may be using the mm. IPI those kernel * threads if they exist. */ on_each_cpu_mask(mm_cpumask(mm), enable_lam_func, mm, true); set_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags); } static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits) { if (!cpu_feature_enabled(X86_FEATURE_LAM)) return -ENODEV; /* PTRACE_ARCH_PRCTL */ if (current->mm != mm) return -EINVAL; if (mm_valid_pasid(mm) && !test_bit(MM_CONTEXT_FORCE_TAGGED_SVA, &mm->context.flags)) return -EINVAL; if (mmap_write_lock_killable(mm)) return -EINTR; /* * MM_CONTEXT_LOCK_LAM is set on clone. Prevent LAM from * being enabled unless the process is single threaded: */ if (test_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags)) { mmap_write_unlock(mm); return -EBUSY; } if (!nr_bits || nr_bits > LAM_U57_BITS) { mmap_write_unlock(mm); return -EINVAL; } mm_enable_lam(mm); mmap_write_unlock(mm); return 0; } #endif long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2) { int ret = 0; switch (option) { case ARCH_SET_GS: { if (unlikely(arg2 >= TASK_SIZE_MAX)) return -EPERM; preempt_disable(); /* * ARCH_SET_GS has always overwritten the index * and the base. Zero is the most sensible value * to put in the index, and is the only value that * makes any sense if FSGSBASE is unavailable. */ if (task == current) { loadseg(GS, 0); x86_gsbase_write_cpu_inactive(arg2); /* * On non-FSGSBASE systems, save_base_legacy() expects * that we also fill in thread.gsbase. */ task->thread.gsbase = arg2; } else { task->thread.gsindex = 0; x86_gsbase_write_task(task, arg2); } preempt_enable(); break; } case ARCH_SET_FS: { /* * Not strictly needed for %fs, but do it for symmetry * with %gs */ if (unlikely(arg2 >= TASK_SIZE_MAX)) return -EPERM; preempt_disable(); /* * Set the selector to 0 for the same reason * as %gs above. */ if (task == current) { loadseg(FS, 0); x86_fsbase_write_cpu(arg2); /* * On non-FSGSBASE systems, save_base_legacy() expects * that we also fill in thread.fsbase. */ task->thread.fsbase = arg2; } else { task->thread.fsindex = 0; x86_fsbase_write_task(task, arg2); } preempt_enable(); break; } case ARCH_GET_FS: { unsigned long base = x86_fsbase_read_task(task); ret = put_user(base, (unsigned long __user *)arg2); break; } case ARCH_GET_GS: { unsigned long base = x86_gsbase_read_task(task); ret = put_user(base, (unsigned long __user *)arg2); break; } #ifdef CONFIG_CHECKPOINT_RESTORE # ifdef CONFIG_X86_X32_ABI case ARCH_MAP_VDSO_X32: return prctl_map_vdso(&vdso_image_x32, arg2); # endif # ifdef CONFIG_IA32_EMULATION case ARCH_MAP_VDSO_32: return prctl_map_vdso(&vdso_image_32, arg2); # endif case ARCH_MAP_VDSO_64: return prctl_map_vdso(&vdso_image_64, arg2); #endif #ifdef CONFIG_ADDRESS_MASKING case ARCH_GET_UNTAG_MASK: return put_user(task->mm->context.untag_mask, (unsigned long __user *)arg2); case ARCH_ENABLE_TAGGED_ADDR: return prctl_enable_tagged_addr(task->mm, arg2); case ARCH_FORCE_TAGGED_SVA: if (current != task) return -EINVAL; set_bit(MM_CONTEXT_FORCE_TAGGED_SVA, &task->mm->context.flags); return 0; case ARCH_GET_MAX_TAG_BITS: if (!cpu_feature_enabled(X86_FEATURE_LAM)) return put_user(0, (unsigned long __user *)arg2); else return put_user(LAM_U57_BITS, (unsigned long __user *)arg2); #endif case ARCH_SHSTK_ENABLE: case ARCH_SHSTK_DISABLE: case ARCH_SHSTK_LOCK: case ARCH_SHSTK_UNLOCK: case ARCH_SHSTK_STATUS: return shstk_prctl(task, option, arg2); default: ret = -EINVAL; break; } return ret; }
16 4 16 16 16 13 13 13 13 12 12 12 12 12 12 12 12 13 8 8 8 28 5 17 17 4 1 16 2 14 14 14 14 3 17 17 17 17 17 17 6 6 6 6 5 5 2 4 4 4 4 4 4 4 4 4 3 4 4 6 9 9 1 1 1 1 1 1 1 1 1 9 9 23 22 21 20 22 12 12 12 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 // SPDX-License-Identifier: GPL-2.0 /* Watch queue and general notification mechanism, built on pipes * * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * See Documentation/core-api/watch_queue.rst */ #define pr_fmt(fmt) "watchq: " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/printk.h> #include <linux/miscdevice.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/poll.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/file.h> #include <linux/security.h> #include <linux/cred.h> #include <linux/sched/signal.h> #include <linux/watch_queue.h> #include <linux/pipe_fs_i.h> MODULE_DESCRIPTION("Watch queue"); MODULE_AUTHOR("Red Hat, Inc."); #define WATCH_QUEUE_NOTE_SIZE 128 #define WATCH_QUEUE_NOTES_PER_PAGE (PAGE_SIZE / WATCH_QUEUE_NOTE_SIZE) /* * This must be called under the RCU read-lock, which makes * sure that the wqueue still exists. It can then take the lock, * and check that the wqueue hasn't been destroyed, which in * turn makes sure that the notification pipe still exists. */ static inline bool lock_wqueue(struct watch_queue *wqueue) { spin_lock_bh(&wqueue->lock); if (unlikely(!wqueue->pipe)) { spin_unlock_bh(&wqueue->lock); return false; } return true; } static inline void unlock_wqueue(struct watch_queue *wqueue) { spin_unlock_bh(&wqueue->lock); } static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct watch_queue *wqueue = (struct watch_queue *)buf->private; struct page *page; unsigned int bit; /* We need to work out which note within the page this refers to, but * the note might have been maximum size, so merely ANDing the offset * off doesn't work. OTOH, the note must've been more than zero size. */ bit = buf->offset + buf->len; if ((bit & (WATCH_QUEUE_NOTE_SIZE - 1)) == 0) bit -= WATCH_QUEUE_NOTE_SIZE; bit /= WATCH_QUEUE_NOTE_SIZE; page = buf->page; bit += page->private; set_bit(bit, wqueue->notes_bitmap); generic_pipe_buf_release(pipe, buf); } // No try_steal function => no stealing #define watch_queue_pipe_buf_try_steal NULL /* New data written to a pipe may be appended to a buffer with this type. */ static const struct pipe_buf_operations watch_queue_pipe_buf_ops = { .release = watch_queue_pipe_buf_release, .try_steal = watch_queue_pipe_buf_try_steal, .get = generic_pipe_buf_get, }; /* * Post a notification to a watch queue. * * Must be called with the RCU lock for reading, and the * watch_queue lock held, which guarantees that the pipe * hasn't been released. */ static bool post_one_notification(struct watch_queue *wqueue, struct watch_notification *n) { void *p; struct pipe_inode_info *pipe = wqueue->pipe; struct pipe_buffer *buf; struct page *page; unsigned int head, tail, note, offset, len; bool done = false; spin_lock_irq(&pipe->rd_wait.lock); head = pipe->head; tail = pipe->tail; if (pipe_full(head, tail, pipe->ring_size)) goto lost; note = find_first_bit(wqueue->notes_bitmap, wqueue->nr_notes); if (note >= wqueue->nr_notes) goto lost; page = wqueue->notes[note / WATCH_QUEUE_NOTES_PER_PAGE]; offset = note % WATCH_QUEUE_NOTES_PER_PAGE * WATCH_QUEUE_NOTE_SIZE; get_page(page); len = n->info & WATCH_INFO_LENGTH; p = kmap_atomic(page); memcpy(p + offset, n, len); kunmap_atomic(p); buf = pipe_buf(pipe, head); buf->page = page; buf->private = (unsigned long)wqueue; buf->ops = &watch_queue_pipe_buf_ops; buf->offset = offset; buf->len = len; buf->flags = PIPE_BUF_FLAG_WHOLE; smp_store_release(&pipe->head, head + 1); /* vs pipe_read() */ if (!test_and_clear_bit(note, wqueue->notes_bitmap)) { spin_unlock_irq(&pipe->rd_wait.lock); BUG(); } wake_up_interruptible_sync_poll_locked(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); done = true; out: spin_unlock_irq(&pipe->rd_wait.lock); if (done) kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); return done; lost: buf = pipe_buf(pipe, head - 1); buf->flags |= PIPE_BUF_FLAG_LOSS; goto out; } /* * Apply filter rules to a notification. */ static bool filter_watch_notification(const struct watch_filter *wf, const struct watch_notification *n) { const struct watch_type_filter *wt; unsigned int st_bits = sizeof(wt->subtype_filter[0]) * 8; unsigned int st_index = n->subtype / st_bits; unsigned int st_bit = 1U << (n->subtype % st_bits); int i; if (!test_bit(n->type, wf->type_filter)) return false; for (i = 0; i < wf->nr_filters; i++) { wt = &wf->filters[i]; if (n->type == wt->type && (wt->subtype_filter[st_index] & st_bit) && (n->info & wt->info_mask) == wt->info_filter) return true; } return false; /* If there is a filter, the default is to reject. */ } /** * __post_watch_notification - Post an event notification * @wlist: The watch list to post the event to. * @n: The notification record to post. * @cred: The creds of the process that triggered the notification. * @id: The ID to match on the watch. * * Post a notification of an event into a set of watch queues and let the users * know. * * The size of the notification should be set in n->info & WATCH_INFO_LENGTH and * should be in units of sizeof(*n). */ void __post_watch_notification(struct watch_list *wlist, struct watch_notification *n, const struct cred *cred, u64 id) { const struct watch_filter *wf; struct watch_queue *wqueue; struct watch *watch; if (((n->info & WATCH_INFO_LENGTH) >> WATCH_INFO_LENGTH__SHIFT) == 0) { WARN_ON(1); return; } rcu_read_lock(); hlist_for_each_entry_rcu(watch, &wlist->watchers, list_node) { if (watch->id != id) continue; n->info &= ~WATCH_INFO_ID; n->info |= watch->info_id; wqueue = rcu_dereference(watch->queue); wf = rcu_dereference(wqueue->filter); if (wf && !filter_watch_notification(wf, n)) continue; if (security_post_notification(watch->cred, cred, n) < 0) continue; if (lock_wqueue(wqueue)) { post_one_notification(wqueue, n); unlock_wqueue(wqueue); } } rcu_read_unlock(); } EXPORT_SYMBOL(__post_watch_notification); /* * Allocate sufficient pages to preallocation for the requested number of * notifications. */ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes) { struct watch_queue *wqueue = pipe->watch_queue; struct page **pages; unsigned long *bitmap; unsigned long user_bufs; int ret, i, nr_pages; if (!wqueue) return -ENODEV; if (wqueue->notes) return -EBUSY; if (nr_notes < 1 || nr_notes > 512) /* TODO: choose a better hard limit */ return -EINVAL; nr_pages = (nr_notes + WATCH_QUEUE_NOTES_PER_PAGE - 1); nr_pages /= WATCH_QUEUE_NOTES_PER_PAGE; user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_pages); if (nr_pages > pipe->max_usage && (too_many_pipe_buffers_hard(user_bufs) || too_many_pipe_buffers_soft(user_bufs)) && pipe_is_unprivileged_user()) { ret = -EPERM; goto error; } nr_notes = nr_pages * WATCH_QUEUE_NOTES_PER_PAGE; ret = pipe_resize_ring(pipe, roundup_pow_of_two(nr_notes)); if (ret < 0) goto error; /* * pipe_resize_ring() does not update nr_accounted for watch_queue * pipes, because the above vastly overprovisions. Set nr_accounted on * and max_usage this pipe to the number that was actually charged to * the user above via account_pipe_buffers. */ pipe->max_usage = nr_pages; pipe->nr_accounted = nr_pages; ret = -ENOMEM; pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) goto error; for (i = 0; i < nr_pages; i++) { pages[i] = alloc_page(GFP_KERNEL); if (!pages[i]) goto error_p; pages[i]->private = i * WATCH_QUEUE_NOTES_PER_PAGE; } bitmap = bitmap_alloc(nr_notes, GFP_KERNEL); if (!bitmap) goto error_p; bitmap_fill(bitmap, nr_notes); wqueue->notes = pages; wqueue->notes_bitmap = bitmap; wqueue->nr_pages = nr_pages; wqueue->nr_notes = nr_notes; return 0; error_p: while (--i >= 0) __free_page(pages[i]); kfree(pages); error: (void) account_pipe_buffers(pipe->user, nr_pages, pipe->nr_accounted); return ret; } /* * Set the filter on a watch queue. */ long watch_queue_set_filter(struct pipe_inode_info *pipe, struct watch_notification_filter __user *_filter) { struct watch_notification_type_filter *tf; struct watch_notification_filter filter; struct watch_type_filter *q; struct watch_filter *wfilter; struct watch_queue *wqueue = pipe->watch_queue; int ret, nr_filter = 0, i; if (!wqueue) return -ENODEV; if (!_filter) { /* Remove the old filter */ wfilter = NULL; goto set; } /* Grab the user's filter specification */ if (copy_from_user(&filter, _filter, sizeof(filter)) != 0) return -EFAULT; if (filter.nr_filters == 0 || filter.nr_filters > 16 || filter.__reserved != 0) return -EINVAL; tf = memdup_array_user(_filter->filters, filter.nr_filters, sizeof(*tf)); if (IS_ERR(tf)) return PTR_ERR(tf); ret = -EINVAL; for (i = 0; i < filter.nr_filters; i++) { if ((tf[i].info_filter & ~tf[i].info_mask) || tf[i].info_mask & WATCH_INFO_LENGTH) goto err_filter; /* Ignore any unknown types */ if (tf[i].type >= WATCH_TYPE__NR) continue; nr_filter++; } /* Now we need to build the internal filter from only the relevant * user-specified filters. */ ret = -ENOMEM; wfilter = kzalloc(struct_size(wfilter, filters, nr_filter), GFP_KERNEL); if (!wfilter) goto err_filter; wfilter->nr_filters = nr_filter; q = wfilter->filters; for (i = 0; i < filter.nr_filters; i++) { if (tf[i].type >= WATCH_TYPE__NR) continue; q->type = tf[i].type; q->info_filter = tf[i].info_filter; q->info_mask = tf[i].info_mask; q->subtype_filter[0] = tf[i].subtype_filter[0]; __set_bit(q->type, wfilter->type_filter); q++; } kfree(tf); set: pipe_lock(pipe); wfilter = rcu_replace_pointer(wqueue->filter, wfilter, lockdep_is_held(&pipe->mutex)); pipe_unlock(pipe); if (wfilter) kfree_rcu(wfilter, rcu); return 0; err_filter: kfree(tf); return ret; } static void __put_watch_queue(struct kref *kref) { struct watch_queue *wqueue = container_of(kref, struct watch_queue, usage); struct watch_filter *wfilter; int i; for (i = 0; i < wqueue->nr_pages; i++) __free_page(wqueue->notes[i]); kfree(wqueue->notes); bitmap_free(wqueue->notes_bitmap); wfilter = rcu_access_pointer(wqueue->filter); if (wfilter) kfree_rcu(wfilter, rcu); kfree_rcu(wqueue, rcu); } /** * put_watch_queue - Dispose of a ref on a watchqueue. * @wqueue: The watch queue to unref. */ void put_watch_queue(struct watch_queue *wqueue) { kref_put(&wqueue->usage, __put_watch_queue); } EXPORT_SYMBOL(put_watch_queue); static void free_watch(struct rcu_head *rcu) { struct watch *watch = container_of(rcu, struct watch, rcu); put_watch_queue(rcu_access_pointer(watch->queue)); atomic_dec(&watch->cred->user->nr_watches); put_cred(watch->cred); kfree(watch); } static void __put_watch(struct kref *kref) { struct watch *watch = container_of(kref, struct watch, usage); call_rcu(&watch->rcu, free_watch); } /* * Discard a watch. */ static void put_watch(struct watch *watch) { kref_put(&watch->usage, __put_watch); } /** * init_watch - Initialise a watch * @watch: The watch to initialise. * @wqueue: The queue to assign. * * Initialise a watch and set the watch queue. */ void init_watch(struct watch *watch, struct watch_queue *wqueue) { kref_init(&watch->usage); INIT_HLIST_NODE(&watch->list_node); INIT_HLIST_NODE(&watch->queue_node); rcu_assign_pointer(watch->queue, wqueue); } static int add_one_watch(struct watch *watch, struct watch_list *wlist, struct watch_queue *wqueue) { const struct cred *cred; struct watch *w; hlist_for_each_entry(w, &wlist->watchers, list_node) { struct watch_queue *wq = rcu_access_pointer(w->queue); if (wqueue == wq && watch->id == w->id) return -EBUSY; } cred = current_cred(); if (atomic_inc_return(&cred->user->nr_watches) > task_rlimit(current, RLIMIT_NOFILE)) { atomic_dec(&cred->user->nr_watches); return -EAGAIN; } watch->cred = get_cred(cred); rcu_assign_pointer(watch->watch_list, wlist); kref_get(&wqueue->usage); kref_get(&watch->usage); hlist_add_head(&watch->queue_node, &wqueue->watches); hlist_add_head_rcu(&watch->list_node, &wlist->watchers); return 0; } /** * add_watch_to_object - Add a watch on an object to a watch list * @watch: The watch to add * @wlist: The watch list to add to * * @watch->queue must have been set to point to the queue to post notifications * to and the watch list of the object to be watched. @watch->cred must also * have been set to the appropriate credentials and a ref taken on them. * * The caller must pin the queue and the list both and must hold the list * locked against racing watch additions/removals. */ int add_watch_to_object(struct watch *watch, struct watch_list *wlist) { struct watch_queue *wqueue; int ret = -ENOENT; rcu_read_lock(); wqueue = rcu_access_pointer(watch->queue); if (lock_wqueue(wqueue)) { spin_lock(&wlist->lock); ret = add_one_watch(watch, wlist, wqueue); spin_unlock(&wlist->lock); unlock_wqueue(wqueue); } rcu_read_unlock(); return ret; } EXPORT_SYMBOL(add_watch_to_object); /** * remove_watch_from_object - Remove a watch or all watches from an object. * @wlist: The watch list to remove from * @wq: The watch queue of interest (ignored if @all is true) * @id: The ID of the watch to remove (ignored if @all is true) * @all: True to remove all objects * * Remove a specific watch or all watches from an object. A notification is * sent to the watcher to tell them that this happened. */ int remove_watch_from_object(struct watch_list *wlist, struct watch_queue *wq, u64 id, bool all) { struct watch_notification_removal n; struct watch_queue *wqueue; struct watch *watch; int ret = -EBADSLT; rcu_read_lock(); again: spin_lock(&wlist->lock); hlist_for_each_entry(watch, &wlist->watchers, list_node) { if (all || (watch->id == id && rcu_access_pointer(watch->queue) == wq)) goto found; } spin_unlock(&wlist->lock); goto out; found: ret = 0; hlist_del_init_rcu(&watch->list_node); rcu_assign_pointer(watch->watch_list, NULL); spin_unlock(&wlist->lock); /* We now own the reference on watch that used to belong to wlist. */ n.watch.type = WATCH_TYPE_META; n.watch.subtype = WATCH_META_REMOVAL_NOTIFICATION; n.watch.info = watch->info_id | watch_sizeof(n.watch); n.id = id; if (id != 0) n.watch.info = watch->info_id | watch_sizeof(n); wqueue = rcu_dereference(watch->queue); if (lock_wqueue(wqueue)) { post_one_notification(wqueue, &n.watch); if (!hlist_unhashed(&watch->queue_node)) { hlist_del_init_rcu(&watch->queue_node); put_watch(watch); } unlock_wqueue(wqueue); } if (wlist->release_watch) { void (*release_watch)(struct watch *); release_watch = wlist->release_watch; rcu_read_unlock(); (*release_watch)(watch); rcu_read_lock(); } put_watch(watch); if (all && !hlist_empty(&wlist->watchers)) goto again; out: rcu_read_unlock(); return ret; } EXPORT_SYMBOL(remove_watch_from_object); /* * Remove all the watches that are contributory to a queue. This has the * potential to race with removal of the watches by the destruction of the * objects being watched or with the distribution of notifications. */ void watch_queue_clear(struct watch_queue *wqueue) { struct watch_list *wlist; struct watch *watch; bool release; rcu_read_lock(); spin_lock_bh(&wqueue->lock); /* * This pipe can be freed by callers like free_pipe_info(). * Removing this reference also prevents new notifications. */ wqueue->pipe = NULL; while (!hlist_empty(&wqueue->watches)) { watch = hlist_entry(wqueue->watches.first, struct watch, queue_node); hlist_del_init_rcu(&watch->queue_node); /* We now own a ref on the watch. */ spin_unlock_bh(&wqueue->lock); /* We can't do the next bit under the queue lock as we need to * get the list lock - which would cause a deadlock if someone * was removing from the opposite direction at the same time or * posting a notification. */ wlist = rcu_dereference(watch->watch_list); if (wlist) { void (*release_watch)(struct watch *); spin_lock(&wlist->lock); release = !hlist_unhashed(&watch->list_node); if (release) { hlist_del_init_rcu(&watch->list_node); rcu_assign_pointer(watch->watch_list, NULL); /* We now own a second ref on the watch. */ } release_watch = wlist->release_watch; spin_unlock(&wlist->lock); if (release) { if (release_watch) { rcu_read_unlock(); /* This might need to call dput(), so * we have to drop all the locks. */ (*release_watch)(watch); rcu_read_lock(); } put_watch(watch); } } put_watch(watch); spin_lock_bh(&wqueue->lock); } spin_unlock_bh(&wqueue->lock); rcu_read_unlock(); } /** * get_watch_queue - Get a watch queue from its file descriptor. * @fd: The fd to query. */ struct watch_queue *get_watch_queue(int fd) { struct pipe_inode_info *pipe; struct watch_queue *wqueue = ERR_PTR(-EINVAL); CLASS(fd, f)(fd); if (!fd_empty(f)) { pipe = get_pipe_info(fd_file(f), false); if (pipe && pipe->watch_queue) { wqueue = pipe->watch_queue; kref_get(&wqueue->usage); } } return wqueue; } EXPORT_SYMBOL(get_watch_queue); /* * Initialise a watch queue */ int watch_queue_init(struct pipe_inode_info *pipe) { struct watch_queue *wqueue; wqueue = kzalloc(sizeof(*wqueue), GFP_KERNEL); if (!wqueue) return -ENOMEM; wqueue->pipe = pipe; kref_init(&wqueue->usage); spin_lock_init(&wqueue->lock); INIT_HLIST_HEAD(&wqueue->watches); pipe->watch_queue = wqueue; return 0; }
1 1 1 1 1 1 10 9 9 9 9 10 4 4 1 1 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 /* * slcan.c - serial line CAN interface driver (using tty line discipline) * * This file is derived from linux/drivers/net/slip/slip.c and got * inspiration from linux/drivers/net/can/can327.c for the rework made * on the line discipline code. * * slip.c Authors : Laurence Culhane <loz@holmes.demon.co.uk> * Fred N. van Kempen <waltje@uwalt.nl.mugnet.org> * slcan.c Author : Oliver Hartkopp <socketcan@hartkopp.net> * can327.c Author : Max Staudt <max-linux@enpas.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, see http://www.gnu.org/licenses/gpl.html * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/uaccess.h> #include <linux/bitops.h> #include <linux/string.h> #include <linux/tty.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/workqueue.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/skb.h> #include "slcan.h" MODULE_ALIAS_LDISC(N_SLCAN); MODULE_DESCRIPTION("serial line CAN interface"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>"); MODULE_AUTHOR("Dario Binacchi <dario.binacchi@amarulasolutions.com>"); /* maximum rx buffer len: extended CAN frame with timestamp */ #define SLCAN_MTU (sizeof("T1111222281122334455667788EA5F\r") + 1) #define SLCAN_CMD_LEN 1 #define SLCAN_SFF_ID_LEN 3 #define SLCAN_EFF_ID_LEN 8 #define SLCAN_STATE_LEN 1 #define SLCAN_STATE_BE_RXCNT_LEN 3 #define SLCAN_STATE_BE_TXCNT_LEN 3 #define SLCAN_STATE_FRAME_LEN (1 + SLCAN_CMD_LEN + \ SLCAN_STATE_BE_RXCNT_LEN + \ SLCAN_STATE_BE_TXCNT_LEN) struct slcan { struct can_priv can; /* Various fields. */ struct tty_struct *tty; /* ptr to TTY structure */ struct net_device *dev; /* easy for intr handling */ spinlock_t lock; struct work_struct tx_work; /* Flushes transmit buffer */ /* These are pointers to the malloc()ed frame buffers. */ unsigned char rbuff[SLCAN_MTU]; /* receiver buffer */ int rcount; /* received chars counter */ unsigned char xbuff[SLCAN_MTU]; /* transmitter buffer*/ unsigned char *xhead; /* pointer to next XMIT byte */ int xleft; /* bytes left in XMIT queue */ unsigned long flags; /* Flag values/ mode etc */ #define SLF_ERROR 0 /* Parity, etc. error */ #define SLF_XCMD 1 /* Command transmission */ unsigned long cmd_flags; /* Command flags */ #define CF_ERR_RST 0 /* Reset errors on open */ wait_queue_head_t xcmd_wait; /* Wait queue for commands */ /* transmission */ }; static const u32 slcan_bitrate_const[] = { 10000, 20000, 50000, 100000, 125000, 250000, 500000, 800000, 1000000 }; bool slcan_err_rst_on_open(struct net_device *ndev) { struct slcan *sl = netdev_priv(ndev); return !!test_bit(CF_ERR_RST, &sl->cmd_flags); } int slcan_enable_err_rst_on_open(struct net_device *ndev, bool on) { struct slcan *sl = netdev_priv(ndev); if (netif_running(ndev)) return -EBUSY; if (on) set_bit(CF_ERR_RST, &sl->cmd_flags); else clear_bit(CF_ERR_RST, &sl->cmd_flags); return 0; } /************************************************************************* * SLCAN ENCAPSULATION FORMAT * *************************************************************************/ /* A CAN frame has a can_id (11 bit standard frame format OR 29 bit extended * frame format) a data length code (len) which can be from 0 to 8 * and up to <len> data bytes as payload. * Additionally a CAN frame may become a remote transmission frame if the * RTR-bit is set. This causes another ECU to send a CAN frame with the * given can_id. * * The SLCAN ASCII representation of these different frame types is: * <type> <id> <dlc> <data>* * * Extended frames (29 bit) are defined by capital characters in the type. * RTR frames are defined as 'r' types - normal frames have 't' type: * t => 11 bit data frame * r => 11 bit RTR frame * T => 29 bit data frame * R => 29 bit RTR frame * * The <id> is 3 (standard) or 8 (extended) bytes in ASCII Hex (base64). * The <dlc> is a one byte ASCII number ('0' - '8') * The <data> section has at much ASCII Hex bytes as defined by the <dlc> * * Examples: * * t1230 : can_id 0x123, len 0, no data * t4563112233 : can_id 0x456, len 3, data 0x11 0x22 0x33 * T12ABCDEF2AA55 : extended can_id 0x12ABCDEF, len 2, data 0xAA 0x55 * r1230 : can_id 0x123, len 0, no data, remote transmission request * */ /************************************************************************* * STANDARD SLCAN DECAPSULATION * *************************************************************************/ /* Send one completely decapsulated can_frame to the network layer */ static void slcan_bump_frame(struct slcan *sl) { struct sk_buff *skb; struct can_frame *cf; int i, tmp; u32 tmpid; char *cmd = sl->rbuff; skb = alloc_can_skb(sl->dev, &cf); if (unlikely(!skb)) { sl->dev->stats.rx_dropped++; return; } switch (*cmd) { case 'r': cf->can_id = CAN_RTR_FLAG; fallthrough; case 't': /* store dlc ASCII value and terminate SFF CAN ID string */ cf->len = sl->rbuff[SLCAN_CMD_LEN + SLCAN_SFF_ID_LEN]; sl->rbuff[SLCAN_CMD_LEN + SLCAN_SFF_ID_LEN] = 0; /* point to payload data behind the dlc */ cmd += SLCAN_CMD_LEN + SLCAN_SFF_ID_LEN + 1; break; case 'R': cf->can_id = CAN_RTR_FLAG; fallthrough; case 'T': cf->can_id |= CAN_EFF_FLAG; /* store dlc ASCII value and terminate EFF CAN ID string */ cf->len = sl->rbuff[SLCAN_CMD_LEN + SLCAN_EFF_ID_LEN]; sl->rbuff[SLCAN_CMD_LEN + SLCAN_EFF_ID_LEN] = 0; /* point to payload data behind the dlc */ cmd += SLCAN_CMD_LEN + SLCAN_EFF_ID_LEN + 1; break; default: goto decode_failed; } if (kstrtou32(sl->rbuff + SLCAN_CMD_LEN, 16, &tmpid)) goto decode_failed; cf->can_id |= tmpid; /* get len from sanitized ASCII value */ if (cf->len >= '0' && cf->len < '9') cf->len -= '0'; else goto decode_failed; /* RTR frames may have a dlc > 0 but they never have any data bytes */ if (!(cf->can_id & CAN_RTR_FLAG)) { for (i = 0; i < cf->len; i++) { tmp = hex_to_bin(*cmd++); if (tmp < 0) goto decode_failed; cf->data[i] = (tmp << 4); tmp = hex_to_bin(*cmd++); if (tmp < 0) goto decode_failed; cf->data[i] |= tmp; } } sl->dev->stats.rx_packets++; if (!(cf->can_id & CAN_RTR_FLAG)) sl->dev->stats.rx_bytes += cf->len; netif_rx(skb); return; decode_failed: sl->dev->stats.rx_errors++; dev_kfree_skb(skb); } /* A change state frame must contain state info and receive and transmit * error counters. * * Examples: * * sb256256 : state bus-off: rx counter 256, tx counter 256 * sa057033 : state active, rx counter 57, tx counter 33 */ static void slcan_bump_state(struct slcan *sl) { struct net_device *dev = sl->dev; struct sk_buff *skb; struct can_frame *cf; char *cmd = sl->rbuff; u32 rxerr, txerr; enum can_state state, rx_state, tx_state; switch (cmd[1]) { case 'a': state = CAN_STATE_ERROR_ACTIVE; break; case 'w': state = CAN_STATE_ERROR_WARNING; break; case 'p': state = CAN_STATE_ERROR_PASSIVE; break; case 'b': state = CAN_STATE_BUS_OFF; break; default: return; } if (state == sl->can.state || sl->rcount < SLCAN_STATE_FRAME_LEN) return; cmd += SLCAN_STATE_BE_RXCNT_LEN + SLCAN_CMD_LEN + 1; cmd[SLCAN_STATE_BE_TXCNT_LEN] = 0; if (kstrtou32(cmd, 10, &txerr)) return; *cmd = 0; cmd -= SLCAN_STATE_BE_RXCNT_LEN; if (kstrtou32(cmd, 10, &rxerr)) return; skb = alloc_can_err_skb(dev, &cf); tx_state = txerr >= rxerr ? state : 0; rx_state = txerr <= rxerr ? state : 0; can_change_state(dev, cf, tx_state, rx_state); if (state == CAN_STATE_BUS_OFF) { can_bus_off(dev); } else if (skb) { cf->can_id |= CAN_ERR_CNT; cf->data[6] = txerr; cf->data[7] = rxerr; } if (skb) netif_rx(skb); } /* An error frame can contain more than one type of error. * * Examples: * * e1a : len 1, errors: ACK error * e3bcO: len 3, errors: Bit0 error, CRC error, Tx overrun error */ static void slcan_bump_err(struct slcan *sl) { struct net_device *dev = sl->dev; struct sk_buff *skb; struct can_frame *cf; char *cmd = sl->rbuff; bool rx_errors = false, tx_errors = false, rx_over_errors = false; int i, len; /* get len from sanitized ASCII value */ len = cmd[1]; if (len >= '0' && len < '9') len -= '0'; else return; if ((len + SLCAN_CMD_LEN + 1) > sl->rcount) return; skb = alloc_can_err_skb(dev, &cf); if (skb) cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; cmd += SLCAN_CMD_LEN + 1; for (i = 0; i < len; i++, cmd++) { switch (*cmd) { case 'a': netdev_dbg(dev, "ACK error\n"); tx_errors = true; if (skb) { cf->can_id |= CAN_ERR_ACK; cf->data[3] = CAN_ERR_PROT_LOC_ACK; } break; case 'b': netdev_dbg(dev, "Bit0 error\n"); tx_errors = true; if (skb) cf->data[2] |= CAN_ERR_PROT_BIT0; break; case 'B': netdev_dbg(dev, "Bit1 error\n"); tx_errors = true; if (skb) cf->data[2] |= CAN_ERR_PROT_BIT1; break; case 'c': netdev_dbg(dev, "CRC error\n"); rx_errors = true; if (skb) { cf->data[2] |= CAN_ERR_PROT_BIT; cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; } break; case 'f': netdev_dbg(dev, "Form Error\n"); rx_errors = true; if (skb) cf->data[2] |= CAN_ERR_PROT_FORM; break; case 'o': netdev_dbg(dev, "Rx overrun error\n"); rx_over_errors = true; rx_errors = true; if (skb) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; } break; case 'O': netdev_dbg(dev, "Tx overrun error\n"); tx_errors = true; if (skb) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_TX_OVERFLOW; } break; case 's': netdev_dbg(dev, "Stuff error\n"); rx_errors = true; if (skb) cf->data[2] |= CAN_ERR_PROT_STUFF; break; default: if (skb) dev_kfree_skb(skb); return; } } if (rx_errors) dev->stats.rx_errors++; if (rx_over_errors) dev->stats.rx_over_errors++; if (tx_errors) dev->stats.tx_errors++; if (skb) netif_rx(skb); } static void slcan_bump(struct slcan *sl) { switch (sl->rbuff[0]) { case 'r': fallthrough; case 't': fallthrough; case 'R': fallthrough; case 'T': return slcan_bump_frame(sl); case 'e': return slcan_bump_err(sl); case 's': return slcan_bump_state(sl); default: return; } } /* parse tty input stream */ static void slcan_unesc(struct slcan *sl, unsigned char s) { if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */ if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && sl->rcount > 4) slcan_bump(sl); sl->rcount = 0; } else { if (!test_bit(SLF_ERROR, &sl->flags)) { if (sl->rcount < SLCAN_MTU) { sl->rbuff[sl->rcount++] = s; return; } sl->dev->stats.rx_over_errors++; set_bit(SLF_ERROR, &sl->flags); } } } /************************************************************************* * STANDARD SLCAN ENCAPSULATION * *************************************************************************/ /* Encapsulate one can_frame and stuff into a TTY queue. */ static void slcan_encaps(struct slcan *sl, struct can_frame *cf) { int actual, i; unsigned char *pos; unsigned char *endpos; canid_t id = cf->can_id; pos = sl->xbuff; if (cf->can_id & CAN_RTR_FLAG) *pos = 'R'; /* becomes 'r' in standard frame format (SFF) */ else *pos = 'T'; /* becomes 't' in standard frame format (SSF) */ /* determine number of chars for the CAN-identifier */ if (cf->can_id & CAN_EFF_FLAG) { id &= CAN_EFF_MASK; endpos = pos + SLCAN_EFF_ID_LEN; } else { *pos |= 0x20; /* convert R/T to lower case for SFF */ id &= CAN_SFF_MASK; endpos = pos + SLCAN_SFF_ID_LEN; } /* build 3 (SFF) or 8 (EFF) digit CAN identifier */ pos++; while (endpos >= pos) { *endpos-- = hex_asc_upper[id & 0xf]; id >>= 4; } pos += (cf->can_id & CAN_EFF_FLAG) ? SLCAN_EFF_ID_LEN : SLCAN_SFF_ID_LEN; *pos++ = cf->len + '0'; /* RTR frames may have a dlc > 0 but they never have any data bytes */ if (!(cf->can_id & CAN_RTR_FLAG)) { for (i = 0; i < cf->len; i++) pos = hex_byte_pack_upper(pos, cf->data[i]); sl->dev->stats.tx_bytes += cf->len; } *pos++ = '\r'; /* Order of next two lines is *very* important. * When we are sending a little amount of data, * the transfer may be completed inside the ops->write() * routine, because it's running with interrupts enabled. * In this case we *never* got WRITE_WAKEUP event, * if we did not request it before write operation. * 14 Oct 1994 Dmitry Gorodchanin. */ set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff); sl->xleft = (pos - sl->xbuff) - actual; sl->xhead = sl->xbuff + actual; } /* Write out any remaining transmit buffer. Scheduled when tty is writable */ static void slcan_transmit(struct work_struct *work) { struct slcan *sl = container_of(work, struct slcan, tx_work); int actual; spin_lock_bh(&sl->lock); /* First make sure we're connected. */ if (unlikely(!netif_running(sl->dev)) && likely(!test_bit(SLF_XCMD, &sl->flags))) { spin_unlock_bh(&sl->lock); return; } if (sl->xleft <= 0) { if (unlikely(test_bit(SLF_XCMD, &sl->flags))) { clear_bit(SLF_XCMD, &sl->flags); clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); spin_unlock_bh(&sl->lock); wake_up(&sl->xcmd_wait); return; } /* Now serial buffer is almost free & we can start * transmission of another packet */ sl->dev->stats.tx_packets++; clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); spin_unlock_bh(&sl->lock); netif_wake_queue(sl->dev); return; } actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft); sl->xleft -= actual; sl->xhead += actual; spin_unlock_bh(&sl->lock); } /* Called by the driver when there's room for more data. * Schedule the transmit. */ static void slcan_write_wakeup(struct tty_struct *tty) { struct slcan *sl = tty->disc_data; schedule_work(&sl->tx_work); } /* Send a can_frame to a TTY queue. */ static netdev_tx_t slcan_netdev_xmit(struct sk_buff *skb, struct net_device *dev) { struct slcan *sl = netdev_priv(dev); if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; spin_lock(&sl->lock); if (!netif_running(dev)) { spin_unlock(&sl->lock); netdev_warn(dev, "xmit: iface is down\n"); goto out; } if (!sl->tty) { spin_unlock(&sl->lock); goto out; } netif_stop_queue(sl->dev); slcan_encaps(sl, (struct can_frame *)skb->data); /* encaps & send */ spin_unlock(&sl->lock); skb_tx_timestamp(skb); out: kfree_skb(skb); return NETDEV_TX_OK; } /****************************************** * Routines looking at netdevice side. ******************************************/ static int slcan_transmit_cmd(struct slcan *sl, const unsigned char *cmd) { int ret, actual, n; spin_lock(&sl->lock); if (!sl->tty) { spin_unlock(&sl->lock); return -ENODEV; } n = scnprintf(sl->xbuff, sizeof(sl->xbuff), "%s", cmd); set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); actual = sl->tty->ops->write(sl->tty, sl->xbuff, n); sl->xleft = n - actual; sl->xhead = sl->xbuff + actual; set_bit(SLF_XCMD, &sl->flags); spin_unlock(&sl->lock); ret = wait_event_interruptible_timeout(sl->xcmd_wait, !test_bit(SLF_XCMD, &sl->flags), HZ); clear_bit(SLF_XCMD, &sl->flags); if (ret == -ERESTARTSYS) return ret; if (ret == 0) return -ETIMEDOUT; return 0; } /* Netdevice UP -> DOWN routine */ static int slcan_netdev_close(struct net_device *dev) { struct slcan *sl = netdev_priv(dev); int err; if (sl->can.bittiming.bitrate && sl->can.bittiming.bitrate != CAN_BITRATE_UNKNOWN) { err = slcan_transmit_cmd(sl, "C\r"); if (err) netdev_warn(dev, "failed to send close command 'C\\r'\n"); } /* TTY discipline is running. */ clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); flush_work(&sl->tx_work); netif_stop_queue(dev); sl->rcount = 0; sl->xleft = 0; close_candev(dev); sl->can.state = CAN_STATE_STOPPED; if (sl->can.bittiming.bitrate == CAN_BITRATE_UNKNOWN) sl->can.bittiming.bitrate = CAN_BITRATE_UNSET; return 0; } /* Netdevice DOWN -> UP routine */ static int slcan_netdev_open(struct net_device *dev) { struct slcan *sl = netdev_priv(dev); unsigned char cmd[SLCAN_MTU]; int err, s; /* The baud rate is not set with the command * `ip link set <iface> type can bitrate <baud>' and therefore * can.bittiming.bitrate is CAN_BITRATE_UNSET (0), causing * open_candev() to fail. So let's set to a fake value. */ if (sl->can.bittiming.bitrate == CAN_BITRATE_UNSET) sl->can.bittiming.bitrate = CAN_BITRATE_UNKNOWN; err = open_candev(dev); if (err) { netdev_err(dev, "failed to open can device\n"); return err; } if (sl->can.bittiming.bitrate != CAN_BITRATE_UNKNOWN) { for (s = 0; s < ARRAY_SIZE(slcan_bitrate_const); s++) { if (sl->can.bittiming.bitrate == slcan_bitrate_const[s]) break; } /* The CAN framework has already validate the bitrate value, * so we can avoid to check if `s' has been properly set. */ snprintf(cmd, sizeof(cmd), "C\rS%d\r", s); err = slcan_transmit_cmd(sl, cmd); if (err) { netdev_err(dev, "failed to send bitrate command 'C\\rS%d\\r'\n", s); goto cmd_transmit_failed; } if (test_bit(CF_ERR_RST, &sl->cmd_flags)) { err = slcan_transmit_cmd(sl, "F\r"); if (err) { netdev_err(dev, "failed to send error command 'F\\r'\n"); goto cmd_transmit_failed; } } if (sl->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) { err = slcan_transmit_cmd(sl, "L\r"); if (err) { netdev_err(dev, "failed to send listen-only command 'L\\r'\n"); goto cmd_transmit_failed; } } else { err = slcan_transmit_cmd(sl, "O\r"); if (err) { netdev_err(dev, "failed to send open command 'O\\r'\n"); goto cmd_transmit_failed; } } } sl->can.state = CAN_STATE_ERROR_ACTIVE; netif_start_queue(dev); return 0; cmd_transmit_failed: close_candev(dev); return err; } static const struct net_device_ops slcan_netdev_ops = { .ndo_open = slcan_netdev_open, .ndo_stop = slcan_netdev_close, .ndo_start_xmit = slcan_netdev_xmit, .ndo_change_mtu = can_change_mtu, }; /****************************************** * Routines looking at TTY side. ******************************************/ /* Handle the 'receiver data ready' interrupt. * This function is called by the 'tty_io' module in the kernel when * a block of SLCAN data has been received, which can now be decapsulated * and sent on to some IP layer for further processing. This will not * be re-entered while running but other ldisc functions may be called * in parallel */ static void slcan_receive_buf(struct tty_struct *tty, const u8 *cp, const u8 *fp, size_t count) { struct slcan *sl = tty->disc_data; if (!netif_running(sl->dev)) return; /* Read the characters out of the buffer */ while (count--) { if (fp && *fp++) { if (!test_and_set_bit(SLF_ERROR, &sl->flags)) sl->dev->stats.rx_errors++; cp++; continue; } slcan_unesc(sl, *cp++); } } /* Open the high-level part of the SLCAN channel. * This function is called by the TTY module when the * SLCAN line discipline is called for. * * Called in process context serialized from other ldisc calls. */ static int slcan_open(struct tty_struct *tty) { struct net_device *dev; struct slcan *sl; int err; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (!tty->ops->write) return -EOPNOTSUPP; dev = alloc_candev(sizeof(*sl), 1); if (!dev) return -ENFILE; sl = netdev_priv(dev); /* Configure TTY interface */ tty->receive_room = 65536; /* We don't flow control */ sl->rcount = 0; sl->xleft = 0; spin_lock_init(&sl->lock); INIT_WORK(&sl->tx_work, slcan_transmit); init_waitqueue_head(&sl->xcmd_wait); /* Configure CAN metadata */ sl->can.bitrate_const = slcan_bitrate_const; sl->can.bitrate_const_cnt = ARRAY_SIZE(slcan_bitrate_const); sl->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY; /* Configure netdev interface */ sl->dev = dev; dev->netdev_ops = &slcan_netdev_ops; dev->ethtool_ops = &slcan_ethtool_ops; /* Mark ldisc channel as alive */ sl->tty = tty; tty->disc_data = sl; err = register_candev(dev); if (err) { free_candev(dev); pr_err("can't register candev\n"); return err; } netdev_info(dev, "slcan on %s.\n", tty->name); /* TTY layer expects 0 on success */ return 0; } /* Close down a SLCAN channel. * This means flushing out any pending queues, and then returning. This * call is serialized against other ldisc functions. * Once this is called, no other ldisc function of ours is entered. * * We also use this method for a hangup event. */ static void slcan_close(struct tty_struct *tty) { struct slcan *sl = tty->disc_data; unregister_candev(sl->dev); /* * The netdev needn't be UP (so .ndo_stop() is not called). Hence make * sure this is not running before freeing it up. */ flush_work(&sl->tx_work); /* Mark channel as dead */ spin_lock_bh(&sl->lock); tty->disc_data = NULL; sl->tty = NULL; spin_unlock_bh(&sl->lock); netdev_info(sl->dev, "slcan off %s.\n", tty->name); free_candev(sl->dev); } /* Perform I/O control on an active SLCAN channel. */ static int slcan_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct slcan *sl = tty->disc_data; unsigned int tmp; switch (cmd) { case SIOCGIFNAME: tmp = strlen(sl->dev->name) + 1; if (copy_to_user((void __user *)arg, sl->dev->name, tmp)) return -EFAULT; return 0; case SIOCSIFHWADDR: return -EINVAL; default: return tty_mode_ioctl(tty, cmd, arg); } } static struct tty_ldisc_ops slcan_ldisc = { .owner = THIS_MODULE, .num = N_SLCAN, .name = KBUILD_MODNAME, .open = slcan_open, .close = slcan_close, .ioctl = slcan_ioctl, .receive_buf = slcan_receive_buf, .write_wakeup = slcan_write_wakeup, }; static int __init slcan_init(void) { int status; pr_info("serial line CAN interface driver\n"); /* Fill in our line protocol discipline, and register it */ status = tty_register_ldisc(&slcan_ldisc); if (status) pr_err("can't register line discipline\n"); return status; } static void __exit slcan_exit(void) { /* This will only be called when all channels have been closed by * userspace - tty_ldisc.c takes care of the module's refcount. */ tty_unregister_ldisc(&slcan_ldisc); } module_init(slcan_init); module_exit(slcan_exit);
2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 /* SPDX-License-Identifier: GPL-2.0 */ /* * Header for use in defining a given L4 protocol for connection tracking. * * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp> * - generalized L3 protocol dependent part. * * Derived from include/linux/netfiter_ipv4/ip_conntrack_protcol.h */ #ifndef _NF_CONNTRACK_L4PROTO_H #define _NF_CONNTRACK_L4PROTO_H #include <linux/netlink.h> #include <net/netlink.h> #include <net/netfilter/nf_conntrack.h> #include <net/netns/generic.h> struct seq_file; struct nf_conntrack_l4proto { /* L4 Protocol number. */ u_int8_t l4proto; /* Resolve clashes on insertion races. */ bool allow_clash; /* protoinfo nlattr size, closes a hole */ u16 nlattr_size; /* called by gc worker if table is full */ bool (*can_early_drop)(const struct nf_conn *ct); /* convert protoinfo to nfnetink attributes */ int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla, struct nf_conn *ct, bool destroy); /* convert nfnetlink attributes to protoinfo */ int (*from_nlattr)(struct nlattr *tb[], struct nf_conn *ct); int (*tuple_to_nlattr)(struct sk_buff *skb, const struct nf_conntrack_tuple *t); /* Calculate tuple nlattr size */ unsigned int (*nlattr_tuple_size)(void); int (*nlattr_to_tuple)(struct nlattr *tb[], struct nf_conntrack_tuple *t, u_int32_t flags); const struct nla_policy *nla_policy; struct { int (*nlattr_to_obj)(struct nlattr *tb[], struct net *net, void *data); int (*obj_to_nlattr)(struct sk_buff *skb, const void *data); u16 obj_size; u16 nlattr_max; const struct nla_policy *nla_policy; } ctnl_timeout; #ifdef CONFIG_NF_CONNTRACK_PROCFS /* Print out the private part of the conntrack. */ void (*print_conntrack)(struct seq_file *s, struct nf_conn *); #endif }; bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, struct net *net, struct nf_conntrack_tuple *tuple); bool icmpv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, struct net *net, struct nf_conntrack_tuple *tuple); bool nf_conntrack_invert_icmp_tuple(struct nf_conntrack_tuple *tuple, const struct nf_conntrack_tuple *orig); bool nf_conntrack_invert_icmpv6_tuple(struct nf_conntrack_tuple *tuple, const struct nf_conntrack_tuple *orig); int nf_conntrack_inet_error(struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, const struct nf_hook_state *state, u8 l4proto, union nf_inet_addr *outer_daddr); int nf_conntrack_icmpv4_error(struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, const struct nf_hook_state *state); int nf_conntrack_icmpv6_error(struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, const struct nf_hook_state *state); int nf_conntrack_icmp_packet(struct nf_conn *ct, struct sk_buff *skb, enum ip_conntrack_info ctinfo, const struct nf_hook_state *state); int nf_conntrack_icmpv6_packet(struct nf_conn *ct, struct sk_buff *skb, enum ip_conntrack_info ctinfo, const struct nf_hook_state *state); int nf_conntrack_udp_packet(struct nf_conn *ct, struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info ctinfo, const struct nf_hook_state *state); int nf_conntrack_udplite_packet(struct nf_conn *ct, struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info ctinfo, const struct nf_hook_state *state); int nf_conntrack_tcp_packet(struct nf_conn *ct, struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info ctinfo, const struct nf_hook_state *state); int nf_conntrack_dccp_packet(struct nf_conn *ct, struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info ctinfo, const struct nf_hook_state *state); int nf_conntrack_sctp_packet(struct nf_conn *ct, struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info ctinfo, const struct nf_hook_state *state); int nf_conntrack_gre_packet(struct nf_conn *ct, struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info ctinfo, const struct nf_hook_state *state); void nf_conntrack_generic_init_net(struct net *net); void nf_conntrack_tcp_init_net(struct net *net); void nf_conntrack_udp_init_net(struct net *net); void nf_conntrack_gre_init_net(struct net *net); void nf_conntrack_dccp_init_net(struct net *net); void nf_conntrack_sctp_init_net(struct net *net); void nf_conntrack_icmp_init_net(struct net *net); void nf_conntrack_icmpv6_init_net(struct net *net); /* Existing built-in generic protocol */ extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic; #define MAX_NF_CT_PROTO IPPROTO_UDPLITE const struct nf_conntrack_l4proto *nf_ct_l4proto_find(u8 l4proto); /* Generic netlink helpers */ int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple); int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[], struct nf_conntrack_tuple *t, u_int32_t flags); unsigned int nf_ct_port_nlattr_tuple_size(void); extern const struct nla_policy nf_ct_port_nla_policy[]; #ifdef CONFIG_SYSCTL __printf(4, 5) __cold void nf_ct_l4proto_log_invalid(const struct sk_buff *skb, const struct nf_conn *ct, const struct nf_hook_state *state, const char *fmt, ...); __printf(4, 5) __cold void nf_l4proto_log_invalid(const struct sk_buff *skb, const struct nf_hook_state *state, u8 protonum, const char *fmt, ...); #else static inline __printf(4, 5) __cold void nf_l4proto_log_invalid(const struct sk_buff *skb, const struct nf_hook_state *state, u8 protonum, const char *fmt, ...) {} static inline __printf(4, 5) __cold void nf_ct_l4proto_log_invalid(const struct sk_buff *skb, const struct nf_conn *ct, const struct nf_hook_state *state, const char *fmt, ...) { } #endif /* CONFIG_SYSCTL */ #if IS_ENABLED(CONFIG_NF_CONNTRACK) static inline struct nf_generic_net *nf_generic_pernet(struct net *net) { return &net->ct.nf_ct_proto.generic; } static inline struct nf_tcp_net *nf_tcp_pernet(struct net *net) { return &net->ct.nf_ct_proto.tcp; } static inline struct nf_udp_net *nf_udp_pernet(struct net *net) { return &net->ct.nf_ct_proto.udp; } static inline struct nf_icmp_net *nf_icmp_pernet(struct net *net) { return &net->ct.nf_ct_proto.icmp; } static inline struct nf_icmp_net *nf_icmpv6_pernet(struct net *net) { return &net->ct.nf_ct_proto.icmpv6; } /* Caller must check nf_ct_protonum(ct) is IPPROTO_TCP before calling. */ static inline void nf_ct_set_tcp_be_liberal(struct nf_conn *ct) { ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; } /* Caller must check nf_ct_protonum(ct) is IPPROTO_TCP before calling. */ static inline bool nf_conntrack_tcp_established(const struct nf_conn *ct) { return ct->proto.tcp.state == TCP_CONNTRACK_ESTABLISHED && test_bit(IPS_ASSURED_BIT, &ct->status); } #endif #ifdef CONFIG_NF_CT_PROTO_DCCP static inline struct nf_dccp_net *nf_dccp_pernet(struct net *net) { return &net->ct.nf_ct_proto.dccp; } #endif #ifdef CONFIG_NF_CT_PROTO_SCTP static inline struct nf_sctp_net *nf_sctp_pernet(struct net *net) { return &net->ct.nf_ct_proto.sctp; } #endif #ifdef CONFIG_NF_CT_PROTO_GRE static inline struct nf_gre_net *nf_gre_pernet(struct net *net) { return &net->ct.nf_ct_proto.gre; } #endif #endif /*_NF_CONNTRACK_PROTOCOL_H*/
106 108 4 4 3 3 3 2 2 14 3 11 6 6 6 108 104 108 108 108 98 14 11 4 3 2 105 108 5 108 108 108 108 108 14 98 108 14 10 14 4 108 105 3 3 1 105 69 69 108 108 108 3 105 14 14 5 108 5 2 2 1 2 2 2 2 2 7 5 5 5 4 3 4 4 4 3 3 2 4 1 1 3 3 4 3 3 1 2 2 2 1 2 3 6 3 3 3 2 1 2 3 3 2 3 3 7 7 5 5 6 6 8 1 7 8 23 7 20 71 71 71 1 71 71 11 10 11 13 6 5 2 2 6 10 3 7 6 15 4 15 11 11 15 71 71 16 7 8 16 1 16 3 7 13 1 13 3 2 3 1 1 1 3 17 11 13 7 13 13 13 4 13 11 11 17 17 12 17 5 4 4 4 5 4 5 2 1 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 // SPDX-License-Identifier: GPL-2.0-only /* * common code for virtio vsock * * Copyright (C) 2013-2015 Red Hat, Inc. * Author: Asias He <asias@redhat.com> * Stefan Hajnoczi <stefanha@redhat.com> */ #include <linux/spinlock.h> #include <linux/module.h> #include <linux/sched/signal.h> #include <linux/ctype.h> #include <linux/list.h> #include <linux/virtio_vsock.h> #include <uapi/linux/vsockmon.h> #include <net/sock.h> #include <net/af_vsock.h> #define CREATE_TRACE_POINTS #include <trace/events/vsock_virtio_transport_common.h> /* How long to wait for graceful shutdown of a connection */ #define VSOCK_CLOSE_TIMEOUT (8 * HZ) /* Threshold for detecting small packets to copy */ #define GOOD_COPY_LEN 128 static void virtio_transport_cancel_close_work(struct vsock_sock *vsk, bool cancel_timeout); static const struct virtio_transport * virtio_transport_get_ops(struct vsock_sock *vsk) { const struct vsock_transport *t = vsock_core_get_transport(vsk); if (WARN_ON(!t)) return NULL; return container_of(t, struct virtio_transport, transport); } static bool virtio_transport_can_zcopy(const struct virtio_transport *t_ops, struct virtio_vsock_pkt_info *info, size_t pkt_len) { struct iov_iter *iov_iter; if (!info->msg) return false; iov_iter = &info->msg->msg_iter; if (iov_iter->iov_offset) return false; /* We can't send whole iov. */ if (iov_iter->count > pkt_len) return false; /* Check that transport can send data in zerocopy mode. */ t_ops = virtio_transport_get_ops(info->vsk); if (t_ops->can_msgzerocopy) { int pages_to_send = iov_iter_npages(iov_iter, MAX_SKB_FRAGS); /* +1 is for packet header. */ return t_ops->can_msgzerocopy(pages_to_send + 1); } return true; } static int virtio_transport_init_zcopy_skb(struct vsock_sock *vsk, struct sk_buff *skb, struct msghdr *msg, bool zerocopy) { struct ubuf_info *uarg; if (msg->msg_ubuf) { uarg = msg->msg_ubuf; net_zcopy_get(uarg); } else { struct iov_iter *iter = &msg->msg_iter; struct ubuf_info_msgzc *uarg_zc; uarg = msg_zerocopy_realloc(sk_vsock(vsk), iter->count, NULL); if (!uarg) return -1; uarg_zc = uarg_to_msgzc(uarg); uarg_zc->zerocopy = zerocopy ? 1 : 0; } skb_zcopy_init(skb, uarg); return 0; } static int virtio_transport_fill_skb(struct sk_buff *skb, struct virtio_vsock_pkt_info *info, size_t len, bool zcopy) { if (zcopy) return __zerocopy_sg_from_iter(info->msg, NULL, skb, &info->msg->msg_iter, len); return memcpy_from_msg(skb_put(skb, len), info->msg, len); } static void virtio_transport_init_hdr(struct sk_buff *skb, struct virtio_vsock_pkt_info *info, size_t payload_len, u32 src_cid, u32 src_port, u32 dst_cid, u32 dst_port) { struct virtio_vsock_hdr *hdr; hdr = virtio_vsock_hdr(skb); hdr->type = cpu_to_le16(info->type); hdr->op = cpu_to_le16(info->op); hdr->src_cid = cpu_to_le64(src_cid); hdr->dst_cid = cpu_to_le64(dst_cid); hdr->src_port = cpu_to_le32(src_port); hdr->dst_port = cpu_to_le32(dst_port); hdr->flags = cpu_to_le32(info->flags); hdr->len = cpu_to_le32(payload_len); hdr->buf_alloc = cpu_to_le32(0); hdr->fwd_cnt = cpu_to_le32(0); } static void virtio_transport_copy_nonlinear_skb(const struct sk_buff *skb, void *dst, size_t len) { struct iov_iter iov_iter = { 0 }; struct kvec kvec; size_t to_copy; kvec.iov_base = dst; kvec.iov_len = len; iov_iter.iter_type = ITER_KVEC; iov_iter.kvec = &kvec; iov_iter.nr_segs = 1; to_copy = min_t(size_t, len, skb->len); skb_copy_datagram_iter(skb, VIRTIO_VSOCK_SKB_CB(skb)->offset, &iov_iter, to_copy); } /* Packet capture */ static struct sk_buff *virtio_transport_build_skb(void *opaque) { struct virtio_vsock_hdr *pkt_hdr; struct sk_buff *pkt = opaque; struct af_vsockmon_hdr *hdr; struct sk_buff *skb; size_t payload_len; /* A packet could be split to fit the RX buffer, so we can retrieve * the payload length from the header and the buffer pointer taking * care of the offset in the original packet. */ pkt_hdr = virtio_vsock_hdr(pkt); payload_len = pkt->len; skb = alloc_skb(sizeof(*hdr) + sizeof(*pkt_hdr) + payload_len, GFP_ATOMIC); if (!skb) return NULL; hdr = skb_put(skb, sizeof(*hdr)); /* pkt->hdr is little-endian so no need to byteswap here */ hdr->src_cid = pkt_hdr->src_cid; hdr->src_port = pkt_hdr->src_port; hdr->dst_cid = pkt_hdr->dst_cid; hdr->dst_port = pkt_hdr->dst_port; hdr->transport = cpu_to_le16(AF_VSOCK_TRANSPORT_VIRTIO); hdr->len = cpu_to_le16(sizeof(*pkt_hdr)); memset(hdr->reserved, 0, sizeof(hdr->reserved)); switch (le16_to_cpu(pkt_hdr->op)) { case VIRTIO_VSOCK_OP_REQUEST: case VIRTIO_VSOCK_OP_RESPONSE: hdr->op = cpu_to_le16(AF_VSOCK_OP_CONNECT); break; case VIRTIO_VSOCK_OP_RST: case VIRTIO_VSOCK_OP_SHUTDOWN: hdr->op = cpu_to_le16(AF_VSOCK_OP_DISCONNECT); break; case VIRTIO_VSOCK_OP_RW: hdr->op = cpu_to_le16(AF_VSOCK_OP_PAYLOAD); break; case VIRTIO_VSOCK_OP_CREDIT_UPDATE: case VIRTIO_VSOCK_OP_CREDIT_REQUEST: hdr->op = cpu_to_le16(AF_VSOCK_OP_CONTROL); break; default: hdr->op = cpu_to_le16(AF_VSOCK_OP_UNKNOWN); break; } skb_put_data(skb, pkt_hdr, sizeof(*pkt_hdr)); if (payload_len) { if (skb_is_nonlinear(pkt)) { void *data = skb_put(skb, payload_len); virtio_transport_copy_nonlinear_skb(pkt, data, payload_len); } else { skb_put_data(skb, pkt->data, payload_len); } } return skb; } void virtio_transport_deliver_tap_pkt(struct sk_buff *skb) { if (virtio_vsock_skb_tap_delivered(skb)) return; vsock_deliver_tap(virtio_transport_build_skb, skb); virtio_vsock_skb_set_tap_delivered(skb); } EXPORT_SYMBOL_GPL(virtio_transport_deliver_tap_pkt); static u16 virtio_transport_get_type(struct sock *sk) { if (sk->sk_type == SOCK_STREAM) return VIRTIO_VSOCK_TYPE_STREAM; else return VIRTIO_VSOCK_TYPE_SEQPACKET; } /* Returns new sk_buff on success, otherwise returns NULL. */ static struct sk_buff *virtio_transport_alloc_skb(struct virtio_vsock_pkt_info *info, size_t payload_len, bool zcopy, u32 src_cid, u32 src_port, u32 dst_cid, u32 dst_port) { struct vsock_sock *vsk; struct sk_buff *skb; size_t skb_len; skb_len = VIRTIO_VSOCK_SKB_HEADROOM; if (!zcopy) skb_len += payload_len; skb = virtio_vsock_alloc_skb(skb_len, GFP_KERNEL); if (!skb) return NULL; virtio_transport_init_hdr(skb, info, payload_len, src_cid, src_port, dst_cid, dst_port); vsk = info->vsk; /* If 'vsk' != NULL then payload is always present, so we * will never call '__zerocopy_sg_from_iter()' below without * setting skb owner in 'skb_set_owner_w()'. The only case * when 'vsk' == NULL is VIRTIO_VSOCK_OP_RST control message * without payload. */ WARN_ON_ONCE(!(vsk && (info->msg && payload_len)) && zcopy); /* Set owner here, because '__zerocopy_sg_from_iter()' uses * owner of skb without check to update 'sk_wmem_alloc'. */ if (vsk) skb_set_owner_w(skb, sk_vsock(vsk)); if (info->msg && payload_len > 0) { int err; err = virtio_transport_fill_skb(skb, info, payload_len, zcopy); if (err) goto out; if (msg_data_left(info->msg) == 0 && info->type == VIRTIO_VSOCK_TYPE_SEQPACKET) { struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); hdr->flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM); if (info->msg->msg_flags & MSG_EOR) hdr->flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR); } } if (info->reply) virtio_vsock_skb_set_reply(skb); trace_virtio_transport_alloc_pkt(src_cid, src_port, dst_cid, dst_port, payload_len, info->type, info->op, info->flags, zcopy); return skb; out: kfree_skb(skb); return NULL; } /* This function can only be used on connecting/connected sockets, * since a socket assigned to a transport is required. * * Do not use on listener sockets! */ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk, struct virtio_vsock_pkt_info *info) { u32 max_skb_len = VIRTIO_VSOCK_MAX_PKT_BUF_SIZE; u32 src_cid, src_port, dst_cid, dst_port; const struct virtio_transport *t_ops; struct virtio_vsock_sock *vvs; u32 pkt_len = info->pkt_len; bool can_zcopy = false; u32 rest_len; int ret; info->type = virtio_transport_get_type(sk_vsock(vsk)); t_ops = virtio_transport_get_ops(vsk); if (unlikely(!t_ops)) return -EFAULT; src_cid = t_ops->transport.get_local_cid(); src_port = vsk->local_addr.svm_port; if (!info->remote_cid) { dst_cid = vsk->remote_addr.svm_cid; dst_port = vsk->remote_addr.svm_port; } else { dst_cid = info->remote_cid; dst_port = info->remote_port; } vvs = vsk->trans; /* virtio_transport_get_credit might return less than pkt_len credit */ pkt_len = virtio_transport_get_credit(vvs, pkt_len); /* Do not send zero length OP_RW pkt */ if (pkt_len == 0 && info->op == VIRTIO_VSOCK_OP_RW) return pkt_len; if (info->msg) { /* If zerocopy is not enabled by 'setsockopt()', we behave as * there is no MSG_ZEROCOPY flag set. */ if (!sock_flag(sk_vsock(vsk), SOCK_ZEROCOPY)) info->msg->msg_flags &= ~MSG_ZEROCOPY; if (info->msg->msg_flags & MSG_ZEROCOPY) can_zcopy = virtio_transport_can_zcopy(t_ops, info, pkt_len); if (can_zcopy) max_skb_len = min_t(u32, VIRTIO_VSOCK_MAX_PKT_BUF_SIZE, (MAX_SKB_FRAGS * PAGE_SIZE)); } rest_len = pkt_len; do { struct sk_buff *skb; size_t skb_len; skb_len = min(max_skb_len, rest_len); skb = virtio_transport_alloc_skb(info, skb_len, can_zcopy, src_cid, src_port, dst_cid, dst_port); if (!skb) { ret = -ENOMEM; break; } /* We process buffer part by part, allocating skb on * each iteration. If this is last skb for this buffer * and MSG_ZEROCOPY mode is in use - we must allocate * completion for the current syscall. */ if (info->msg && info->msg->msg_flags & MSG_ZEROCOPY && skb_len == rest_len && info->op == VIRTIO_VSOCK_OP_RW) { if (virtio_transport_init_zcopy_skb(vsk, skb, info->msg, can_zcopy)) { kfree_skb(skb); ret = -ENOMEM; break; } } virtio_transport_inc_tx_pkt(vvs, skb); ret = t_ops->send_pkt(skb); if (ret < 0) break; /* Both virtio and vhost 'send_pkt()' returns 'skb_len', * but for reliability use 'ret' instead of 'skb_len'. * Also if partial send happens (e.g. 'ret' != 'skb_len') * somehow, we break this loop, but account such returned * value in 'virtio_transport_put_credit()'. */ rest_len -= ret; if (WARN_ONCE(ret != skb_len, "'send_pkt()' returns %i, but %zu expected\n", ret, skb_len)) break; } while (rest_len); virtio_transport_put_credit(vvs, rest_len); /* Return number of bytes, if any data has been sent. */ if (rest_len != pkt_len) ret = pkt_len - rest_len; return ret; } static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs, u32 len) { if (vvs->rx_bytes + len > vvs->buf_alloc) return false; vvs->rx_bytes += len; return true; } static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs, u32 len) { vvs->rx_bytes -= len; vvs->fwd_cnt += len; } void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb) { struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); spin_lock_bh(&vvs->rx_lock); vvs->last_fwd_cnt = vvs->fwd_cnt; hdr->fwd_cnt = cpu_to_le32(vvs->fwd_cnt); hdr->buf_alloc = cpu_to_le32(vvs->buf_alloc); spin_unlock_bh(&vvs->rx_lock); } EXPORT_SYMBOL_GPL(virtio_transport_inc_tx_pkt); void virtio_transport_consume_skb_sent(struct sk_buff *skb, bool consume) { struct sock *s = skb->sk; if (s && skb->len) { struct vsock_sock *vs = vsock_sk(s); struct virtio_vsock_sock *vvs; vvs = vs->trans; spin_lock_bh(&vvs->tx_lock); vvs->bytes_unsent -= skb->len; spin_unlock_bh(&vvs->tx_lock); } if (consume) consume_skb(skb); } EXPORT_SYMBOL_GPL(virtio_transport_consume_skb_sent); u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 credit) { u32 ret; if (!credit) return 0; spin_lock_bh(&vvs->tx_lock); ret = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt); if (ret > credit) ret = credit; vvs->tx_cnt += ret; vvs->bytes_unsent += ret; spin_unlock_bh(&vvs->tx_lock); return ret; } EXPORT_SYMBOL_GPL(virtio_transport_get_credit); void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit) { if (!credit) return; spin_lock_bh(&vvs->tx_lock); vvs->tx_cnt -= credit; vvs->bytes_unsent -= credit; spin_unlock_bh(&vvs->tx_lock); } EXPORT_SYMBOL_GPL(virtio_transport_put_credit); static int virtio_transport_send_credit_update(struct vsock_sock *vsk) { struct virtio_vsock_pkt_info info = { .op = VIRTIO_VSOCK_OP_CREDIT_UPDATE, .vsk = vsk, }; return virtio_transport_send_pkt_info(vsk, &info); } static ssize_t virtio_transport_stream_do_peek(struct vsock_sock *vsk, struct msghdr *msg, size_t len) { struct virtio_vsock_sock *vvs = vsk->trans; struct sk_buff *skb; size_t total = 0; int err; spin_lock_bh(&vvs->rx_lock); skb_queue_walk(&vvs->rx_queue, skb) { size_t bytes; bytes = len - total; if (bytes > skb->len) bytes = skb->len; spin_unlock_bh(&vvs->rx_lock); /* sk_lock is held by caller so no one else can dequeue. * Unlock rx_lock since skb_copy_datagram_iter() may sleep. */ err = skb_copy_datagram_iter(skb, VIRTIO_VSOCK_SKB_CB(skb)->offset, &msg->msg_iter, bytes); if (err) goto out; total += bytes; spin_lock_bh(&vvs->rx_lock); if (total == len) break; } spin_unlock_bh(&vvs->rx_lock); return total; out: if (total) err = total; return err; } static ssize_t virtio_transport_stream_do_dequeue(struct vsock_sock *vsk, struct msghdr *msg, size_t len) { struct virtio_vsock_sock *vvs = vsk->trans; size_t bytes, total = 0; struct sk_buff *skb; u32 fwd_cnt_delta; bool low_rx_bytes; int err = -EFAULT; u32 free_space; spin_lock_bh(&vvs->rx_lock); if (WARN_ONCE(skb_queue_empty(&vvs->rx_queue) && vvs->rx_bytes, "rx_queue is empty, but rx_bytes is non-zero\n")) { spin_unlock_bh(&vvs->rx_lock); return err; } while (total < len && !skb_queue_empty(&vvs->rx_queue)) { skb = skb_peek(&vvs->rx_queue); bytes = min_t(size_t, len - total, skb->len - VIRTIO_VSOCK_SKB_CB(skb)->offset); /* sk_lock is held by caller so no one else can dequeue. * Unlock rx_lock since skb_copy_datagram_iter() may sleep. */ spin_unlock_bh(&vvs->rx_lock); err = skb_copy_datagram_iter(skb, VIRTIO_VSOCK_SKB_CB(skb)->offset, &msg->msg_iter, bytes); if (err) goto out; spin_lock_bh(&vvs->rx_lock); total += bytes; VIRTIO_VSOCK_SKB_CB(skb)->offset += bytes; if (skb->len == VIRTIO_VSOCK_SKB_CB(skb)->offset) { u32 pkt_len = le32_to_cpu(virtio_vsock_hdr(skb)->len); virtio_transport_dec_rx_pkt(vvs, pkt_len); __skb_unlink(skb, &vvs->rx_queue); consume_skb(skb); } } fwd_cnt_delta = vvs->fwd_cnt - vvs->last_fwd_cnt; free_space = vvs->buf_alloc - fwd_cnt_delta; low_rx_bytes = (vvs->rx_bytes < sock_rcvlowat(sk_vsock(vsk), 0, INT_MAX)); spin_unlock_bh(&vvs->rx_lock); /* To reduce the number of credit update messages, * don't update credits as long as lots of space is available. * Note: the limit chosen here is arbitrary. Setting the limit * too high causes extra messages. Too low causes transmitter * stalls. As stalls are in theory more expensive than extra * messages, we set the limit to a high value. TODO: experiment * with different values. Also send credit update message when * number of bytes in rx queue is not enough to wake up reader. */ if (fwd_cnt_delta && (free_space < VIRTIO_VSOCK_MAX_PKT_BUF_SIZE || low_rx_bytes)) virtio_transport_send_credit_update(vsk); return total; out: if (total) err = total; return err; } static ssize_t virtio_transport_seqpacket_do_peek(struct vsock_sock *vsk, struct msghdr *msg) { struct virtio_vsock_sock *vvs = vsk->trans; struct sk_buff *skb; size_t total, len; spin_lock_bh(&vvs->rx_lock); if (!vvs->msg_count) { spin_unlock_bh(&vvs->rx_lock); return 0; } total = 0; len = msg_data_left(msg); skb_queue_walk(&vvs->rx_queue, skb) { struct virtio_vsock_hdr *hdr; if (total < len) { size_t bytes; int err; bytes = len - total; if (bytes > skb->len) bytes = skb->len; spin_unlock_bh(&vvs->rx_lock); /* sk_lock is held by caller so no one else can dequeue. * Unlock rx_lock since skb_copy_datagram_iter() may sleep. */ err = skb_copy_datagram_iter(skb, VIRTIO_VSOCK_SKB_CB(skb)->offset, &msg->msg_iter, bytes); if (err) return err; spin_lock_bh(&vvs->rx_lock); } total += skb->len; hdr = virtio_vsock_hdr(skb); if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) { if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR) msg->msg_flags |= MSG_EOR; break; } } spin_unlock_bh(&vvs->rx_lock); return total; } static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk, struct msghdr *msg, int flags) { struct virtio_vsock_sock *vvs = vsk->trans; int dequeued_len = 0; size_t user_buf_len = msg_data_left(msg); bool msg_ready = false; struct sk_buff *skb; spin_lock_bh(&vvs->rx_lock); if (vvs->msg_count == 0) { spin_unlock_bh(&vvs->rx_lock); return 0; } while (!msg_ready) { struct virtio_vsock_hdr *hdr; size_t pkt_len; skb = __skb_dequeue(&vvs->rx_queue); if (!skb) break; hdr = virtio_vsock_hdr(skb); pkt_len = (size_t)le32_to_cpu(hdr->len); if (dequeued_len >= 0) { size_t bytes_to_copy; bytes_to_copy = min(user_buf_len, pkt_len); if (bytes_to_copy) { int err; /* sk_lock is held by caller so no one else can dequeue. * Unlock rx_lock since skb_copy_datagram_iter() may sleep. */ spin_unlock_bh(&vvs->rx_lock); err = skb_copy_datagram_iter(skb, 0, &msg->msg_iter, bytes_to_copy); if (err) { /* Copy of message failed. Rest of * fragments will be freed without copy. */ dequeued_len = err; } else { user_buf_len -= bytes_to_copy; } spin_lock_bh(&vvs->rx_lock); } if (dequeued_len >= 0) dequeued_len += pkt_len; } if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) { msg_ready = true; vvs->msg_count--; if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR) msg->msg_flags |= MSG_EOR; } virtio_transport_dec_rx_pkt(vvs, pkt_len); kfree_skb(skb); } spin_unlock_bh(&vvs->rx_lock); virtio_transport_send_credit_update(vsk); return dequeued_len; } ssize_t virtio_transport_stream_dequeue(struct vsock_sock *vsk, struct msghdr *msg, size_t len, int flags) { if (flags & MSG_PEEK) return virtio_transport_stream_do_peek(vsk, msg, len); else return virtio_transport_stream_do_dequeue(vsk, msg, len); } EXPORT_SYMBOL_GPL(virtio_transport_stream_dequeue); ssize_t virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk, struct msghdr *msg, int flags) { if (flags & MSG_PEEK) return virtio_transport_seqpacket_do_peek(vsk, msg); else return virtio_transport_seqpacket_do_dequeue(vsk, msg, flags); } EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_dequeue); int virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk, struct msghdr *msg, size_t len) { struct virtio_vsock_sock *vvs = vsk->trans; spin_lock_bh(&vvs->tx_lock); if (len > vvs->peer_buf_alloc) { spin_unlock_bh(&vvs->tx_lock); return -EMSGSIZE; } spin_unlock_bh(&vvs->tx_lock); return virtio_transport_stream_enqueue(vsk, msg, len); } EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_enqueue); int virtio_transport_dgram_dequeue(struct vsock_sock *vsk, struct msghdr *msg, size_t len, int flags) { return -EOPNOTSUPP; } EXPORT_SYMBOL_GPL(virtio_transport_dgram_dequeue); s64 virtio_transport_stream_has_data(struct vsock_sock *vsk) { struct virtio_vsock_sock *vvs = vsk->trans; s64 bytes; spin_lock_bh(&vvs->rx_lock); bytes = vvs->rx_bytes; spin_unlock_bh(&vvs->rx_lock); return bytes; } EXPORT_SYMBOL_GPL(virtio_transport_stream_has_data); u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk) { struct virtio_vsock_sock *vvs = vsk->trans; u32 msg_count; spin_lock_bh(&vvs->rx_lock); msg_count = vvs->msg_count; spin_unlock_bh(&vvs->rx_lock); return msg_count; } EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_has_data); static s64 virtio_transport_has_space(struct vsock_sock *vsk) { struct virtio_vsock_sock *vvs = vsk->trans; s64 bytes; bytes = (s64)vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt); if (bytes < 0) bytes = 0; return bytes; } s64 virtio_transport_stream_has_space(struct vsock_sock *vsk) { struct virtio_vsock_sock *vvs = vsk->trans; s64 bytes; spin_lock_bh(&vvs->tx_lock); bytes = virtio_transport_has_space(vsk); spin_unlock_bh(&vvs->tx_lock); return bytes; } EXPORT_SYMBOL_GPL(virtio_transport_stream_has_space); int virtio_transport_do_socket_init(struct vsock_sock *vsk, struct vsock_sock *psk) { struct virtio_vsock_sock *vvs; vvs = kzalloc(sizeof(*vvs), GFP_KERNEL); if (!vvs) return -ENOMEM; vsk->trans = vvs; vvs->vsk = vsk; if (psk && psk->trans) { struct virtio_vsock_sock *ptrans = psk->trans; vvs->peer_buf_alloc = ptrans->peer_buf_alloc; } if (vsk->buffer_size > VIRTIO_VSOCK_MAX_BUF_SIZE) vsk->buffer_size = VIRTIO_VSOCK_MAX_BUF_SIZE; vvs->buf_alloc = vsk->buffer_size; spin_lock_init(&vvs->rx_lock); spin_lock_init(&vvs->tx_lock); skb_queue_head_init(&vvs->rx_queue); return 0; } EXPORT_SYMBOL_GPL(virtio_transport_do_socket_init); /* sk_lock held by the caller */ void virtio_transport_notify_buffer_size(struct vsock_sock *vsk, u64 *val) { struct virtio_vsock_sock *vvs = vsk->trans; if (*val > VIRTIO_VSOCK_MAX_BUF_SIZE) *val = VIRTIO_VSOCK_MAX_BUF_SIZE; vvs->buf_alloc = *val; virtio_transport_send_credit_update(vsk); } EXPORT_SYMBOL_GPL(virtio_transport_notify_buffer_size); int virtio_transport_notify_poll_in(struct vsock_sock *vsk, size_t target, bool *data_ready_now) { *data_ready_now = vsock_stream_has_data(vsk) >= target; return 0; } EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_in); int virtio_transport_notify_poll_out(struct vsock_sock *vsk, size_t target, bool *space_avail_now) { s64 free_space; free_space = vsock_stream_has_space(vsk); if (free_space > 0) *space_avail_now = true; else if (free_space == 0) *space_avail_now = false; return 0; } EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_out); int virtio_transport_notify_recv_init(struct vsock_sock *vsk, size_t target, struct vsock_transport_recv_notify_data *data) { return 0; } EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_init); int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk, size_t target, struct vsock_transport_recv_notify_data *data) { return 0; } EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_block); int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk, size_t target, struct vsock_transport_recv_notify_data *data) { return 0; } EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_dequeue); int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk, size_t target, ssize_t copied, bool data_read, struct vsock_transport_recv_notify_data *data) { return 0; } EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_post_dequeue); int virtio_transport_notify_send_init(struct vsock_sock *vsk, struct vsock_transport_send_notify_data *data) { return 0; } EXPORT_SYMBOL_GPL(virtio_transport_notify_send_init); int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk, struct vsock_transport_send_notify_data *data) { return 0; } EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_block); int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk, struct vsock_transport_send_notify_data *data) { return 0; } EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_enqueue); int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk, ssize_t written, struct vsock_transport_send_notify_data *data) { return 0; } EXPORT_SYMBOL_GPL(virtio_transport_notify_send_post_enqueue); u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk) { return vsk->buffer_size; } EXPORT_SYMBOL_GPL(virtio_transport_stream_rcvhiwat); bool virtio_transport_stream_is_active(struct vsock_sock *vsk) { return true; } EXPORT_SYMBOL_GPL(virtio_transport_stream_is_active); bool virtio_transport_stream_allow(u32 cid, u32 port) { return true; } EXPORT_SYMBOL_GPL(virtio_transport_stream_allow); int virtio_transport_dgram_bind(struct vsock_sock *vsk, struct sockaddr_vm *addr) { return -EOPNOTSUPP; } EXPORT_SYMBOL_GPL(virtio_transport_dgram_bind); bool virtio_transport_dgram_allow(u32 cid, u32 port) { return false; } EXPORT_SYMBOL_GPL(virtio_transport_dgram_allow); int virtio_transport_connect(struct vsock_sock *vsk) { struct virtio_vsock_pkt_info info = { .op = VIRTIO_VSOCK_OP_REQUEST, .vsk = vsk, }; return virtio_transport_send_pkt_info(vsk, &info); } EXPORT_SYMBOL_GPL(virtio_transport_connect); int virtio_transport_shutdown(struct vsock_sock *vsk, int mode) { struct virtio_vsock_pkt_info info = { .op = VIRTIO_VSOCK_OP_SHUTDOWN, .flags = (mode & RCV_SHUTDOWN ? VIRTIO_VSOCK_SHUTDOWN_RCV : 0) | (mode & SEND_SHUTDOWN ? VIRTIO_VSOCK_SHUTDOWN_SEND : 0), .vsk = vsk, }; return virtio_transport_send_pkt_info(vsk, &info); } EXPORT_SYMBOL_GPL(virtio_transport_shutdown); int virtio_transport_dgram_enqueue(struct vsock_sock *vsk, struct sockaddr_vm *remote_addr, struct msghdr *msg, size_t dgram_len) { return -EOPNOTSUPP; } EXPORT_SYMBOL_GPL(virtio_transport_dgram_enqueue); ssize_t virtio_transport_stream_enqueue(struct vsock_sock *vsk, struct msghdr *msg, size_t len) { struct virtio_vsock_pkt_info info = { .op = VIRTIO_VSOCK_OP_RW, .msg = msg, .pkt_len = len, .vsk = vsk, }; return virtio_transport_send_pkt_info(vsk, &info); } EXPORT_SYMBOL_GPL(virtio_transport_stream_enqueue); void virtio_transport_destruct(struct vsock_sock *vsk) { struct virtio_vsock_sock *vvs = vsk->trans; virtio_transport_cancel_close_work(vsk, true); kfree(vvs); vsk->trans = NULL; } EXPORT_SYMBOL_GPL(virtio_transport_destruct); ssize_t virtio_transport_unsent_bytes(struct vsock_sock *vsk) { struct virtio_vsock_sock *vvs = vsk->trans; size_t ret; spin_lock_bh(&vvs->tx_lock); ret = vvs->bytes_unsent; spin_unlock_bh(&vvs->tx_lock); return ret; } EXPORT_SYMBOL_GPL(virtio_transport_unsent_bytes); static int virtio_transport_reset(struct vsock_sock *vsk, struct sk_buff *skb) { struct virtio_vsock_pkt_info info = { .op = VIRTIO_VSOCK_OP_RST, .reply = !!skb, .vsk = vsk, }; /* Send RST only if the original pkt is not a RST pkt */ if (skb && le16_to_cpu(virtio_vsock_hdr(skb)->op) == VIRTIO_VSOCK_OP_RST) return 0; return virtio_transport_send_pkt_info(vsk, &info); } /* Normally packets are associated with a socket. There may be no socket if an * attempt was made to connect to a socket that does not exist. */ static int virtio_transport_reset_no_sock(const struct virtio_transport *t, struct sk_buff *skb) { struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); struct virtio_vsock_pkt_info info = { .op = VIRTIO_VSOCK_OP_RST, .type = le16_to_cpu(hdr->type), .reply = true, }; struct sk_buff *reply; /* Send RST only if the original pkt is not a RST pkt */ if (le16_to_cpu(hdr->op) == VIRTIO_VSOCK_OP_RST) return 0; if (!t) return -ENOTCONN; reply = virtio_transport_alloc_skb(&info, 0, false, le64_to_cpu(hdr->dst_cid), le32_to_cpu(hdr->dst_port), le64_to_cpu(hdr->src_cid), le32_to_cpu(hdr->src_port)); if (!reply) return -ENOMEM; return t->send_pkt(reply); } /* This function should be called with sk_lock held and SOCK_DONE set */ static void virtio_transport_remove_sock(struct vsock_sock *vsk) { struct virtio_vsock_sock *vvs = vsk->trans; /* We don't need to take rx_lock, as the socket is closing and we are * removing it. */ __skb_queue_purge(&vvs->rx_queue); vsock_remove_sock(vsk); } static void virtio_transport_wait_close(struct sock *sk, long timeout) { if (timeout) { DEFINE_WAIT_FUNC(wait, woken_wake_function); add_wait_queue(sk_sleep(sk), &wait); do { if (sk_wait_event(sk, &timeout, sock_flag(sk, SOCK_DONE), &wait)) break; } while (!signal_pending(current) && timeout); remove_wait_queue(sk_sleep(sk), &wait); } } static void virtio_transport_cancel_close_work(struct vsock_sock *vsk, bool cancel_timeout) { struct sock *sk = sk_vsock(vsk); if (vsk->close_work_scheduled && (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) { vsk->close_work_scheduled = false; virtio_transport_remove_sock(vsk); /* Release refcnt obtained when we scheduled the timeout */ sock_put(sk); } } static void virtio_transport_do_close(struct vsock_sock *vsk, bool cancel_timeout) { struct sock *sk = sk_vsock(vsk); sock_set_flag(sk, SOCK_DONE); vsk->peer_shutdown = SHUTDOWN_MASK; if (vsock_stream_has_data(vsk) <= 0) sk->sk_state = TCP_CLOSING; sk->sk_state_change(sk); virtio_transport_cancel_close_work(vsk, cancel_timeout); } static void virtio_transport_close_timeout(struct work_struct *work) { struct vsock_sock *vsk = container_of(work, struct vsock_sock, close_work.work); struct sock *sk = sk_vsock(vsk); sock_hold(sk); lock_sock(sk); if (!sock_flag(sk, SOCK_DONE)) { (void)virtio_transport_reset(vsk, NULL); virtio_transport_do_close(vsk, false); } vsk->close_work_scheduled = false; release_sock(sk); sock_put(sk); } /* User context, vsk->sk is locked */ static bool virtio_transport_close(struct vsock_sock *vsk) { struct sock *sk = &vsk->sk; if (!(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_CLOSING)) return true; /* Already received SHUTDOWN from peer, reply with RST */ if ((vsk->peer_shutdown & SHUTDOWN_MASK) == SHUTDOWN_MASK) { (void)virtio_transport_reset(vsk, NULL); return true; } if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK) (void)virtio_transport_shutdown(vsk, SHUTDOWN_MASK); if (sock_flag(sk, SOCK_LINGER) && !(current->flags & PF_EXITING)) virtio_transport_wait_close(sk, sk->sk_lingertime); if (sock_flag(sk, SOCK_DONE)) { return true; } sock_hold(sk); INIT_DELAYED_WORK(&vsk->close_work, virtio_transport_close_timeout); vsk->close_work_scheduled = true; schedule_delayed_work(&vsk->close_work, VSOCK_CLOSE_TIMEOUT); return false; } void virtio_transport_release(struct vsock_sock *vsk) { struct sock *sk = &vsk->sk; bool remove_sock = true; if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) remove_sock = virtio_transport_close(vsk); if (remove_sock) { sock_set_flag(sk, SOCK_DONE); virtio_transport_remove_sock(vsk); } } EXPORT_SYMBOL_GPL(virtio_transport_release); static int virtio_transport_recv_connecting(struct sock *sk, struct sk_buff *skb) { struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); struct vsock_sock *vsk = vsock_sk(sk); int skerr; int err; switch (le16_to_cpu(hdr->op)) { case VIRTIO_VSOCK_OP_RESPONSE: sk->sk_state = TCP_ESTABLISHED; sk->sk_socket->state = SS_CONNECTED; vsock_insert_connected(vsk); sk->sk_state_change(sk); break; case VIRTIO_VSOCK_OP_INVALID: break; case VIRTIO_VSOCK_OP_RST: skerr = ECONNRESET; err = 0; goto destroy; default: skerr = EPROTO; err = -EINVAL; goto destroy; } return 0; destroy: virtio_transport_reset(vsk, skb); sk->sk_state = TCP_CLOSE; sk->sk_err = skerr; sk_error_report(sk); return err; } static void virtio_transport_recv_enqueue(struct vsock_sock *vsk, struct sk_buff *skb) { struct virtio_vsock_sock *vvs = vsk->trans; bool can_enqueue, free_pkt = false; struct virtio_vsock_hdr *hdr; u32 len; hdr = virtio_vsock_hdr(skb); len = le32_to_cpu(hdr->len); spin_lock_bh(&vvs->rx_lock); can_enqueue = virtio_transport_inc_rx_pkt(vvs, len); if (!can_enqueue) { free_pkt = true; goto out; } if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) vvs->msg_count++; /* Try to copy small packets into the buffer of last packet queued, * to avoid wasting memory queueing the entire buffer with a small * payload. */ if (len <= GOOD_COPY_LEN && !skb_queue_empty(&vvs->rx_queue)) { struct virtio_vsock_hdr *last_hdr; struct sk_buff *last_skb; last_skb = skb_peek_tail(&vvs->rx_queue); last_hdr = virtio_vsock_hdr(last_skb); /* If there is space in the last packet queued, we copy the * new packet in its buffer. We avoid this if the last packet * queued has VIRTIO_VSOCK_SEQ_EOM set, because this is * delimiter of SEQPACKET message, so 'pkt' is the first packet * of a new message. */ if (skb->len < skb_tailroom(last_skb) && !(le32_to_cpu(last_hdr->flags) & VIRTIO_VSOCK_SEQ_EOM)) { memcpy(skb_put(last_skb, skb->len), skb->data, skb->len); free_pkt = true; last_hdr->flags |= hdr->flags; le32_add_cpu(&last_hdr->len, len); goto out; } } __skb_queue_tail(&vvs->rx_queue, skb); out: spin_unlock_bh(&vvs->rx_lock); if (free_pkt) kfree_skb(skb); } static int virtio_transport_recv_connected(struct sock *sk, struct sk_buff *skb) { struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); struct vsock_sock *vsk = vsock_sk(sk); int err = 0; switch (le16_to_cpu(hdr->op)) { case VIRTIO_VSOCK_OP_RW: virtio_transport_recv_enqueue(vsk, skb); vsock_data_ready(sk); return err; case VIRTIO_VSOCK_OP_CREDIT_REQUEST: virtio_transport_send_credit_update(vsk); break; case VIRTIO_VSOCK_OP_CREDIT_UPDATE: sk->sk_write_space(sk); break; case VIRTIO_VSOCK_OP_SHUTDOWN: if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_RCV) vsk->peer_shutdown |= RCV_SHUTDOWN; if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_SEND) vsk->peer_shutdown |= SEND_SHUTDOWN; if (vsk->peer_shutdown == SHUTDOWN_MASK) { if (vsock_stream_has_data(vsk) <= 0 && !sock_flag(sk, SOCK_DONE)) { (void)virtio_transport_reset(vsk, NULL); virtio_transport_do_close(vsk, true); } /* Remove this socket anyway because the remote peer sent * the shutdown. This way a new connection will succeed * if the remote peer uses the same source port, * even if the old socket is still unreleased, but now disconnected. */ vsock_remove_sock(vsk); } if (le32_to_cpu(virtio_vsock_hdr(skb)->flags)) sk->sk_state_change(sk); break; case VIRTIO_VSOCK_OP_RST: virtio_transport_do_close(vsk, true); break; default: err = -EINVAL; break; } kfree_skb(skb); return err; } static void virtio_transport_recv_disconnecting(struct sock *sk, struct sk_buff *skb) { struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); struct vsock_sock *vsk = vsock_sk(sk); if (le16_to_cpu(hdr->op) == VIRTIO_VSOCK_OP_RST) virtio_transport_do_close(vsk, true); } static int virtio_transport_send_response(struct vsock_sock *vsk, struct sk_buff *skb) { struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); struct virtio_vsock_pkt_info info = { .op = VIRTIO_VSOCK_OP_RESPONSE, .remote_cid = le64_to_cpu(hdr->src_cid), .remote_port = le32_to_cpu(hdr->src_port), .reply = true, .vsk = vsk, }; return virtio_transport_send_pkt_info(vsk, &info); } static bool virtio_transport_space_update(struct sock *sk, struct sk_buff *skb) { struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); struct vsock_sock *vsk = vsock_sk(sk); struct virtio_vsock_sock *vvs = vsk->trans; bool space_available; /* Listener sockets are not associated with any transport, so we are * not able to take the state to see if there is space available in the * remote peer, but since they are only used to receive requests, we * can assume that there is always space available in the other peer. */ if (!vvs) return true; /* buf_alloc and fwd_cnt is always included in the hdr */ spin_lock_bh(&vvs->tx_lock); vvs->peer_buf_alloc = le32_to_cpu(hdr->buf_alloc); vvs->peer_fwd_cnt = le32_to_cpu(hdr->fwd_cnt); space_available = virtio_transport_has_space(vsk); spin_unlock_bh(&vvs->tx_lock); return space_available; } /* Handle server socket */ static int virtio_transport_recv_listen(struct sock *sk, struct sk_buff *skb, struct virtio_transport *t) { struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); struct vsock_sock *vsk = vsock_sk(sk); struct vsock_sock *vchild; struct sock *child; int ret; if (le16_to_cpu(hdr->op) != VIRTIO_VSOCK_OP_REQUEST) { virtio_transport_reset_no_sock(t, skb); return -EINVAL; } if (sk_acceptq_is_full(sk)) { virtio_transport_reset_no_sock(t, skb); return -ENOMEM; } /* __vsock_release() might have already flushed accept_queue. * Subsequent enqueues would lead to a memory leak. */ if (sk->sk_shutdown == SHUTDOWN_MASK) { virtio_transport_reset_no_sock(t, skb); return -ESHUTDOWN; } child = vsock_create_connected(sk); if (!child) { virtio_transport_reset_no_sock(t, skb); return -ENOMEM; } sk_acceptq_added(sk); lock_sock_nested(child, SINGLE_DEPTH_NESTING); child->sk_state = TCP_ESTABLISHED; vchild = vsock_sk(child); vsock_addr_init(&vchild->local_addr, le64_to_cpu(hdr->dst_cid), le32_to_cpu(hdr->dst_port)); vsock_addr_init(&vchild->remote_addr, le64_to_cpu(hdr->src_cid), le32_to_cpu(hdr->src_port)); ret = vsock_assign_transport(vchild, vsk); /* Transport assigned (looking at remote_addr) must be the same * where we received the request. */ if (ret || vchild->transport != &t->transport) { release_sock(child); virtio_transport_reset_no_sock(t, skb); sock_put(child); return ret; } if (virtio_transport_space_update(child, skb)) child->sk_write_space(child); vsock_insert_connected(vchild); vsock_enqueue_accept(sk, child); virtio_transport_send_response(vchild, skb); release_sock(child); sk->sk_data_ready(sk); return 0; } static bool virtio_transport_valid_type(u16 type) { return (type == VIRTIO_VSOCK_TYPE_STREAM) || (type == VIRTIO_VSOCK_TYPE_SEQPACKET); } /* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex * lock. */ void virtio_transport_recv_pkt(struct virtio_transport *t, struct sk_buff *skb) { struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb); struct sockaddr_vm src, dst; struct vsock_sock *vsk; struct sock *sk; bool space_available; vsock_addr_init(&src, le64_to_cpu(hdr->src_cid), le32_to_cpu(hdr->src_port)); vsock_addr_init(&dst, le64_to_cpu(hdr->dst_cid), le32_to_cpu(hdr->dst_port)); trace_virtio_transport_recv_pkt(src.svm_cid, src.svm_port, dst.svm_cid, dst.svm_port, le32_to_cpu(hdr->len), le16_to_cpu(hdr->type), le16_to_cpu(hdr->op), le32_to_cpu(hdr->flags), le32_to_cpu(hdr->buf_alloc), le32_to_cpu(hdr->fwd_cnt)); if (!virtio_transport_valid_type(le16_to_cpu(hdr->type))) { (void)virtio_transport_reset_no_sock(t, skb); goto free_pkt; } /* The socket must be in connected or bound table * otherwise send reset back */ sk = vsock_find_connected_socket(&src, &dst); if (!sk) { sk = vsock_find_bound_socket(&dst); if (!sk) { (void)virtio_transport_reset_no_sock(t, skb); goto free_pkt; } } if (virtio_transport_get_type(sk) != le16_to_cpu(hdr->type)) { (void)virtio_transport_reset_no_sock(t, skb); sock_put(sk); goto free_pkt; } if (!skb_set_owner_sk_safe(skb, sk)) { WARN_ONCE(1, "receiving vsock socket has sk_refcnt == 0\n"); goto free_pkt; } vsk = vsock_sk(sk); lock_sock(sk); /* Check if sk has been closed or assigned to another transport before * lock_sock (note: listener sockets are not assigned to any transport) */ if (sock_flag(sk, SOCK_DONE) || (sk->sk_state != TCP_LISTEN && vsk->transport != &t->transport)) { (void)virtio_transport_reset_no_sock(t, skb); release_sock(sk); sock_put(sk); goto free_pkt; } space_available = virtio_transport_space_update(sk, skb); /* Update CID in case it has changed after a transport reset event */ if (vsk->local_addr.svm_cid != VMADDR_CID_ANY) vsk->local_addr.svm_cid = dst.svm_cid; if (space_available) sk->sk_write_space(sk); switch (sk->sk_state) { case TCP_LISTEN: virtio_transport_recv_listen(sk, skb, t); kfree_skb(skb); break; case TCP_SYN_SENT: virtio_transport_recv_connecting(sk, skb); kfree_skb(skb); break; case TCP_ESTABLISHED: virtio_transport_recv_connected(sk, skb); break; case TCP_CLOSING: virtio_transport_recv_disconnecting(sk, skb); kfree_skb(skb); break; default: (void)virtio_transport_reset_no_sock(t, skb); kfree_skb(skb); break; } release_sock(sk); /* Release refcnt obtained when we fetched this socket out of the * bound or connected list. */ sock_put(sk); return; free_pkt: kfree_skb(skb); } EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt); /* Remove skbs found in a queue that have a vsk that matches. * * Each skb is freed. * * Returns the count of skbs that were reply packets. */ int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *queue) { struct sk_buff_head freeme; struct sk_buff *skb, *tmp; int cnt = 0; skb_queue_head_init(&freeme); spin_lock_bh(&queue->lock); skb_queue_walk_safe(queue, skb, tmp) { if (vsock_sk(skb->sk) != vsk) continue; __skb_unlink(skb, queue); __skb_queue_tail(&freeme, skb); if (virtio_vsock_skb_reply(skb)) cnt++; } spin_unlock_bh(&queue->lock); __skb_queue_purge(&freeme); return cnt; } EXPORT_SYMBOL_GPL(virtio_transport_purge_skbs); int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_actor) { struct virtio_vsock_sock *vvs = vsk->trans; struct sock *sk = sk_vsock(vsk); struct virtio_vsock_hdr *hdr; struct sk_buff *skb; int off = 0; int err; spin_lock_bh(&vvs->rx_lock); /* Use __skb_recv_datagram() for race-free handling of the receive. It * works for types other than dgrams. */ skb = __skb_recv_datagram(sk, &vvs->rx_queue, MSG_DONTWAIT, &off, &err); if (!skb) { spin_unlock_bh(&vvs->rx_lock); return err; } hdr = virtio_vsock_hdr(skb); if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) vvs->msg_count--; virtio_transport_dec_rx_pkt(vvs, le32_to_cpu(hdr->len)); spin_unlock_bh(&vvs->rx_lock); virtio_transport_send_credit_update(vsk); return recv_actor(sk, skb); } EXPORT_SYMBOL_GPL(virtio_transport_read_skb); int virtio_transport_notify_set_rcvlowat(struct vsock_sock *vsk, int val) { struct virtio_vsock_sock *vvs = vsk->trans; bool send_update; spin_lock_bh(&vvs->rx_lock); /* If number of available bytes is less than new SO_RCVLOWAT value, * kick sender to send more data, because sender may sleep in its * 'send()' syscall waiting for enough space at our side. Also * don't send credit update when peer already knows actual value - * such transmission will be useless. */ send_update = (vvs->rx_bytes < val) && (vvs->fwd_cnt != vvs->last_fwd_cnt); spin_unlock_bh(&vvs->rx_lock); if (send_update) { int err; err = virtio_transport_send_credit_update(vsk); if (err < 0) return err; } return 0; } EXPORT_SYMBOL_GPL(virtio_transport_notify_set_rcvlowat); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Asias He"); MODULE_DESCRIPTION("common code for virtio vsock");
79 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 // SPDX-License-Identifier: GPL-2.0 #ifndef _LINUX_MM_SLOT_H #define _LINUX_MM_SLOT_H #include <linux/hashtable.h> #include <linux/slab.h> /* * struct mm_slot - hash lookup from mm to mm_slot * @hash: link to the mm_slots hash list * @mm_node: link into the mm_slots list * @mm: the mm that this information is valid for */ struct mm_slot { struct hlist_node hash; struct list_head mm_node; struct mm_struct *mm; }; #define mm_slot_entry(ptr, type, member) \ container_of(ptr, type, member) static inline void *mm_slot_alloc(struct kmem_cache *cache) { if (!cache) /* initialization failed */ return NULL; return kmem_cache_zalloc(cache, GFP_KERNEL); } static inline void mm_slot_free(struct kmem_cache *cache, void *objp) { kmem_cache_free(cache, objp); } #define mm_slot_lookup(_hashtable, _mm) \ ({ \ struct mm_slot *tmp_slot, *mm_slot = NULL; \ \ hash_for_each_possible(_hashtable, tmp_slot, hash, (unsigned long)_mm) \ if (_mm == tmp_slot->mm) { \ mm_slot = tmp_slot; \ break; \ } \ \ mm_slot; \ }) #define mm_slot_insert(_hashtable, _mm, _mm_slot) \ ({ \ _mm_slot->mm = _mm; \ hash_add(_hashtable, &_mm_slot->hash, (unsigned long)_mm); \ }) #endif /* _LINUX_MM_SLOT_H */
14 14 14 13 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2017 Mellanox Technologies Ltd. All rights reserved. */ #include "rxe.h" #include "rxe_hw_counters.h" static const struct rdma_stat_desc rxe_counter_descs[] = { [RXE_CNT_SENT_PKTS].name = "sent_pkts", [RXE_CNT_RCVD_PKTS].name = "rcvd_pkts", [RXE_CNT_DUP_REQ].name = "duplicate_request", [RXE_CNT_OUT_OF_SEQ_REQ].name = "out_of_seq_request", [RXE_CNT_RCV_RNR].name = "rcvd_rnr_err", [RXE_CNT_SND_RNR].name = "send_rnr_err", [RXE_CNT_RCV_SEQ_ERR].name = "rcvd_seq_err", [RXE_CNT_SENDER_SCHED].name = "ack_deferred", [RXE_CNT_RETRY_EXCEEDED].name = "retry_exceeded_err", [RXE_CNT_RNR_RETRY_EXCEEDED].name = "retry_rnr_exceeded_err", [RXE_CNT_COMP_RETRY].name = "completer_retry_err", [RXE_CNT_SEND_ERR].name = "send_err", [RXE_CNT_LINK_DOWNED].name = "link_downed", [RXE_CNT_RDMA_SEND].name = "rdma_sends", [RXE_CNT_RDMA_RECV].name = "rdma_recvs", }; int rxe_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, u32 port, int index) { struct rxe_dev *dev = to_rdev(ibdev); unsigned int cnt; if (!port || !stats) return -EINVAL; for (cnt = 0; cnt < ARRAY_SIZE(rxe_counter_descs); cnt++) stats->value[cnt] = atomic64_read(&dev->stats_counters[cnt]); return ARRAY_SIZE(rxe_counter_descs); } struct rdma_hw_stats *rxe_ib_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num) { BUILD_BUG_ON(ARRAY_SIZE(rxe_counter_descs) != RXE_NUM_OF_COUNTERS); return rdma_alloc_hw_stats_struct(rxe_counter_descs, ARRAY_SIZE(rxe_counter_descs), RDMA_HW_STATS_DEFAULT_LIFESPAN); }
1 1 2 2 2 2 8 6 4 8 8 8 8 4 4 2 4 4 4 4 4 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 // SPDX-License-Identifier: GPL-2.0-or-later /****************************************************************************** * usbtouchscreen.c * Driver for USB Touchscreens, supporting those devices: * - eGalax Touchkit * includes eTurboTouch CT-410/510/700 * - 3M/Microtouch EX II series * - ITM * - PanJit TouchSet * - eTurboTouch * - Gunze AHL61 * - DMC TSC-10/25 * - IRTOUCHSYSTEMS/UNITOP * - IdealTEK URTC1000 * - General Touch * - GoTop Super_Q2/GogoPen/PenPower tablets * - JASTEC USB touch controller/DigiTech DTR-02U * - Zytronic capacitive touchscreen * - NEXIO/iNexio * - Elo TouchSystems 2700 IntelliTouch * - EasyTouch USB Dual/Multi touch controller from Data Modul * * Copyright (C) 2004-2007 by Daniel Ritz <daniel.ritz@gmx.ch> * Copyright (C) by Todd E. Johnson (mtouchusb.c) * * Driver is based on touchkitusb.c * - ITM parts are from itmtouch.c * - 3M parts are from mtouchusb.c * - PanJit parts are from an unmerged driver by Lanslott Gish * - DMC TSC 10/25 are from Holger Schurig, with ideas from an unmerged * driver from Marius Vollmer * *****************************************************************************/ //#define DEBUG #include <linux/kernel.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/usb/input.h> #include <linux/hid.h> #include <linux/mutex.h> static bool swap_xy; module_param(swap_xy, bool, 0644); MODULE_PARM_DESC(swap_xy, "If set X and Y axes are swapped."); static bool hwcalib_xy; module_param(hwcalib_xy, bool, 0644); MODULE_PARM_DESC(hwcalib_xy, "If set hw-calibrated X/Y are used if available"); /* device specifc data/functions */ struct usbtouch_usb; struct usbtouch_device_info { int min_xc, max_xc; int min_yc, max_yc; int min_press, max_press; int rept_size; /* * Always service the USB devices irq not just when the input device is * open. This is useful when devices have a watchdog which prevents us * from periodically polling the device. Leave this unset unless your * touchscreen device requires it, as it does consume more of the USB * bandwidth. */ bool irq_always; /* * used to get the packet len. possible return values: * > 0: packet len * = 0: skip one byte * < 0: -return value more bytes needed */ int (*get_pkt_len) (unsigned char *pkt, int len); int (*read_data) (struct usbtouch_usb *usbtouch, unsigned char *pkt); int (*alloc) (struct usbtouch_usb *usbtouch); int (*init) (struct usbtouch_usb *usbtouch); void (*exit) (struct usbtouch_usb *usbtouch); }; /* a usbtouch device */ struct usbtouch_usb { unsigned char *data; dma_addr_t data_dma; int data_size; unsigned char *buffer; int buf_len; struct urb *irq; struct usb_interface *interface; struct input_dev *input; const struct usbtouch_device_info *type; struct mutex pm_mutex; /* serialize access to open/suspend */ bool is_open; char name[128]; char phys[64]; void *priv; int x, y; int touch, press; void (*process_pkt)(struct usbtouch_usb *usbtouch, unsigned char *pkt, int len); }; /***************************************************************************** * e2i Part */ #ifdef CONFIG_TOUCHSCREEN_USB_E2I static int e2i_init(struct usbtouch_usb *usbtouch) { int ret; struct usb_device *udev = interface_to_usbdev(usbtouch->interface); ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x01, 0x02, 0x0000, 0x0081, NULL, 0, USB_CTRL_SET_TIMEOUT); dev_dbg(&usbtouch->interface->dev, "%s - usb_control_msg - E2I_RESET - bytes|err: %d\n", __func__, ret); return ret; } static int e2i_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { int tmp = (pkt[0] << 8) | pkt[1]; dev->x = (pkt[2] << 8) | pkt[3]; dev->y = (pkt[4] << 8) | pkt[5]; tmp = tmp - 0xA000; dev->touch = (tmp > 0); dev->press = (tmp > 0 ? tmp : 0); return 1; } static const struct usbtouch_device_info e2i_dev_info = { .min_xc = 0x0, .max_xc = 0x7fff, .min_yc = 0x0, .max_yc = 0x7fff, .rept_size = 6, .init = e2i_init, .read_data = e2i_read_data, }; #endif /***************************************************************************** * eGalax part */ #ifdef CONFIG_TOUCHSCREEN_USB_EGALAX #ifndef MULTI_PACKET #define MULTI_PACKET #endif #define EGALAX_PKT_TYPE_MASK 0xFE #define EGALAX_PKT_TYPE_REPT 0x80 #define EGALAX_PKT_TYPE_DIAG 0x0A static int egalax_init(struct usbtouch_usb *usbtouch) { struct usb_device *udev = interface_to_usbdev(usbtouch->interface); int ret, i; /* * An eGalax diagnostic packet kicks the device into using the right * protocol. We send a "check active" packet. The response will be * read later and ignored. */ u8 *buf __free(kfree) = kmalloc(3, GFP_KERNEL); if (!buf) return -ENOMEM; buf[0] = EGALAX_PKT_TYPE_DIAG; buf[1] = 1; /* length */ buf[2] = 'A'; /* command - check active */ for (i = 0; i < 3; i++) { ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, buf, 3, USB_CTRL_SET_TIMEOUT); if (ret != -EPIPE) break; } return ret < 0 ? ret : 0; } static int egalax_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { if ((pkt[0] & EGALAX_PKT_TYPE_MASK) != EGALAX_PKT_TYPE_REPT) return 0; dev->x = ((pkt[3] & 0x0F) << 7) | (pkt[4] & 0x7F); dev->y = ((pkt[1] & 0x0F) << 7) | (pkt[2] & 0x7F); dev->touch = pkt[0] & 0x01; return 1; } static int egalax_get_pkt_len(unsigned char *buf, int len) { switch (buf[0] & EGALAX_PKT_TYPE_MASK) { case EGALAX_PKT_TYPE_REPT: return 5; case EGALAX_PKT_TYPE_DIAG: if (len < 2) return -1; return buf[1] + 2; } return 0; } static const struct usbtouch_device_info egalax_dev_info = { .min_xc = 0x0, .max_xc = 0x07ff, .min_yc = 0x0, .max_yc = 0x07ff, .rept_size = 16, .get_pkt_len = egalax_get_pkt_len, .read_data = egalax_read_data, .init = egalax_init, }; #endif /***************************************************************************** * EasyTouch part */ #ifdef CONFIG_TOUCHSCREEN_USB_EASYTOUCH #ifndef MULTI_PACKET #define MULTI_PACKET #endif #define ETOUCH_PKT_TYPE_MASK 0xFE #define ETOUCH_PKT_TYPE_REPT 0x80 #define ETOUCH_PKT_TYPE_REPT2 0xB0 #define ETOUCH_PKT_TYPE_DIAG 0x0A static int etouch_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { if ((pkt[0] & ETOUCH_PKT_TYPE_MASK) != ETOUCH_PKT_TYPE_REPT && (pkt[0] & ETOUCH_PKT_TYPE_MASK) != ETOUCH_PKT_TYPE_REPT2) return 0; dev->x = ((pkt[1] & 0x1F) << 7) | (pkt[2] & 0x7F); dev->y = ((pkt[3] & 0x1F) << 7) | (pkt[4] & 0x7F); dev->touch = pkt[0] & 0x01; return 1; } static int etouch_get_pkt_len(unsigned char *buf, int len) { switch (buf[0] & ETOUCH_PKT_TYPE_MASK) { case ETOUCH_PKT_TYPE_REPT: case ETOUCH_PKT_TYPE_REPT2: return 5; case ETOUCH_PKT_TYPE_DIAG: if (len < 2) return -1; return buf[1] + 2; } return 0; } static const struct usbtouch_device_info etouch_dev_info = { .min_xc = 0x0, .max_xc = 0x07ff, .min_yc = 0x0, .max_yc = 0x07ff, .rept_size = 16, .get_pkt_len = etouch_get_pkt_len, .read_data = etouch_read_data, }; #endif /***************************************************************************** * PanJit Part */ #ifdef CONFIG_TOUCHSCREEN_USB_PANJIT static int panjit_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { dev->x = ((pkt[2] & 0x0F) << 8) | pkt[1]; dev->y = ((pkt[4] & 0x0F) << 8) | pkt[3]; dev->touch = pkt[0] & 0x01; return 1; } static const struct usbtouch_device_info panjit_dev_info = { .min_xc = 0x0, .max_xc = 0x0fff, .min_yc = 0x0, .max_yc = 0x0fff, .rept_size = 8, .read_data = panjit_read_data, }; #endif /***************************************************************************** * 3M/Microtouch Part */ #ifdef CONFIG_TOUCHSCREEN_USB_3M #define MTOUCHUSB_ASYNC_REPORT 1 #define MTOUCHUSB_RESET 7 #define MTOUCHUSB_REQ_CTRLLR_ID 10 #define MTOUCHUSB_REQ_CTRLLR_ID_LEN 16 static int mtouch_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { if (hwcalib_xy) { dev->x = (pkt[4] << 8) | pkt[3]; dev->y = 0xffff - ((pkt[6] << 8) | pkt[5]); } else { dev->x = (pkt[8] << 8) | pkt[7]; dev->y = (pkt[10] << 8) | pkt[9]; } dev->touch = (pkt[2] & 0x40) ? 1 : 0; return 1; } struct mtouch_priv { u8 fw_rev_major; u8 fw_rev_minor; }; static int mtouch_get_fw_revision(struct usbtouch_usb *usbtouch) { struct usb_device *udev = interface_to_usbdev(usbtouch->interface); struct mtouch_priv *priv = usbtouch->priv; int ret; u8 *buf __free(kfree) = kzalloc(MTOUCHUSB_REQ_CTRLLR_ID_LEN, GFP_NOIO); if (!buf) return -ENOMEM; ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), MTOUCHUSB_REQ_CTRLLR_ID, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, buf, MTOUCHUSB_REQ_CTRLLR_ID_LEN, USB_CTRL_SET_TIMEOUT); if (ret != MTOUCHUSB_REQ_CTRLLR_ID_LEN) { dev_warn(&usbtouch->interface->dev, "Failed to read FW rev: %d\n", ret); return ret < 0 ? ret : -EIO; } priv->fw_rev_major = buf[3]; priv->fw_rev_minor = buf[4]; return 0; } static int mtouch_alloc(struct usbtouch_usb *usbtouch) { struct mtouch_priv *priv; priv = kmalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; usbtouch->priv = priv; return 0; } static int mtouch_init(struct usbtouch_usb *usbtouch) { int ret, i; struct usb_device *udev = interface_to_usbdev(usbtouch->interface); ret = mtouch_get_fw_revision(usbtouch); if (ret) return ret; ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), MTOUCHUSB_RESET, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 1, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); dev_dbg(&usbtouch->interface->dev, "%s - usb_control_msg - MTOUCHUSB_RESET - bytes|err: %d\n", __func__, ret); if (ret < 0) return ret; msleep(150); for (i = 0; i < 3; i++) { ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), MTOUCHUSB_ASYNC_REPORT, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 1, 1, NULL, 0, USB_CTRL_SET_TIMEOUT); dev_dbg(&usbtouch->interface->dev, "%s - usb_control_msg - MTOUCHUSB_ASYNC_REPORT - bytes|err: %d\n", __func__, ret); if (ret >= 0) break; if (ret != -EPIPE) return ret; } /* Default min/max xy are the raw values, override if using hw-calib */ if (hwcalib_xy) { input_set_abs_params(usbtouch->input, ABS_X, 0, 0xffff, 0, 0); input_set_abs_params(usbtouch->input, ABS_Y, 0, 0xffff, 0, 0); } return 0; } static void mtouch_exit(struct usbtouch_usb *usbtouch) { struct mtouch_priv *priv = usbtouch->priv; kfree(priv); } static struct usbtouch_device_info mtouch_dev_info = { .min_xc = 0x0, .max_xc = 0x4000, .min_yc = 0x0, .max_yc = 0x4000, .rept_size = 11, .read_data = mtouch_read_data, .alloc = mtouch_alloc, .init = mtouch_init, .exit = mtouch_exit, }; static ssize_t mtouch_firmware_rev_show(struct device *dev, struct device_attribute *attr, char *output) { struct usb_interface *intf = to_usb_interface(dev); struct usbtouch_usb *usbtouch = usb_get_intfdata(intf); struct mtouch_priv *priv = usbtouch->priv; return sysfs_emit(output, "%1x.%1x\n", priv->fw_rev_major, priv->fw_rev_minor); } static DEVICE_ATTR(firmware_rev, 0444, mtouch_firmware_rev_show, NULL); static struct attribute *mtouch_attrs[] = { &dev_attr_firmware_rev.attr, NULL }; static bool mtouch_group_visible(struct kobject *kobj) { struct device *dev = kobj_to_dev(kobj); struct usb_interface *intf = to_usb_interface(dev); struct usbtouch_usb *usbtouch = usb_get_intfdata(intf); return usbtouch->type == &mtouch_dev_info; } DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(mtouch); static const struct attribute_group mtouch_attr_group = { .is_visible = SYSFS_GROUP_VISIBLE(mtouch), .attrs = mtouch_attrs, }; #endif /***************************************************************************** * ITM Part */ #ifdef CONFIG_TOUCHSCREEN_USB_ITM static int itm_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { int touch; /* * ITM devices report invalid x/y data if not touched. * if the screen was touched before but is not touched any more * report touch as 0 with the last valid x/y data once. then stop * reporting data until touched again. */ dev->press = ((pkt[2] & 0x01) << 7) | (pkt[5] & 0x7F); touch = ~pkt[7] & 0x20; if (!touch) { if (dev->touch) { dev->touch = 0; return 1; } return 0; } dev->x = ((pkt[0] & 0x1F) << 7) | (pkt[3] & 0x7F); dev->y = ((pkt[1] & 0x1F) << 7) | (pkt[4] & 0x7F); dev->touch = touch; return 1; } static const struct usbtouch_device_info itm_dev_info = { .min_xc = 0x0, .max_xc = 0x0fff, .min_yc = 0x0, .max_yc = 0x0fff, .max_press = 0xff, .rept_size = 8, .read_data = itm_read_data, }; #endif /***************************************************************************** * eTurboTouch part */ #ifdef CONFIG_TOUCHSCREEN_USB_ETURBO #ifndef MULTI_PACKET #define MULTI_PACKET #endif static int eturbo_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { unsigned int shift; /* packets should start with sync */ if (!(pkt[0] & 0x80)) return 0; shift = (6 - (pkt[0] & 0x03)); dev->x = ((pkt[3] << 7) | pkt[4]) >> shift; dev->y = ((pkt[1] << 7) | pkt[2]) >> shift; dev->touch = (pkt[0] & 0x10) ? 1 : 0; return 1; } static int eturbo_get_pkt_len(unsigned char *buf, int len) { if (buf[0] & 0x80) return 5; if (buf[0] == 0x01) return 3; return 0; } static const struct usbtouch_device_info eturbo_dev_info = { .min_xc = 0x0, .max_xc = 0x07ff, .min_yc = 0x0, .max_yc = 0x07ff, .rept_size = 8, .get_pkt_len = eturbo_get_pkt_len, .read_data = eturbo_read_data, }; #endif /***************************************************************************** * Gunze part */ #ifdef CONFIG_TOUCHSCREEN_USB_GUNZE static int gunze_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { if (!(pkt[0] & 0x80) || ((pkt[1] | pkt[2] | pkt[3]) & 0x80)) return 0; dev->x = ((pkt[0] & 0x1F) << 7) | (pkt[2] & 0x7F); dev->y = ((pkt[1] & 0x1F) << 7) | (pkt[3] & 0x7F); dev->touch = pkt[0] & 0x20; return 1; } static const struct usbtouch_device_info gunze_dev_info = { .min_xc = 0x0, .max_xc = 0x0fff, .min_yc = 0x0, .max_yc = 0x0fff, .rept_size = 4, .read_data = gunze_read_data, }; #endif /***************************************************************************** * DMC TSC-10/25 Part * * Documentation about the controller and it's protocol can be found at * http://www.dmccoltd.com/files/controler/tsc10usb_pi_e.pdf * http://www.dmccoltd.com/files/controler/tsc25_usb_e.pdf */ #ifdef CONFIG_TOUCHSCREEN_USB_DMC_TSC10 /* supported data rates. currently using 130 */ #define TSC10_RATE_POINT 0x50 #define TSC10_RATE_30 0x40 #define TSC10_RATE_50 0x41 #define TSC10_RATE_80 0x42 #define TSC10_RATE_100 0x43 #define TSC10_RATE_130 0x44 #define TSC10_RATE_150 0x45 /* commands */ #define TSC10_CMD_RESET 0x55 #define TSC10_CMD_RATE 0x05 #define TSC10_CMD_DATA1 0x01 static int dmc_tsc10_init(struct usbtouch_usb *usbtouch) { struct usb_device *dev = interface_to_usbdev(usbtouch->interface); int ret; u8 *buf __free(kfree) = kmalloc(2, GFP_NOIO); if (!buf) return -ENOMEM; /* reset */ buf[0] = buf[1] = 0xFF; ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0), TSC10_CMD_RESET, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, buf, 2, USB_CTRL_SET_TIMEOUT); if (ret < 0) return ret; if (buf[0] != 0x06) return -ENODEV; /* TSC-25 data sheet specifies a delay after the RESET command */ msleep(150); /* set coordinate output rate */ buf[0] = buf[1] = 0xFF; ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0), TSC10_CMD_RATE, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, TSC10_RATE_150, 0, buf, 2, USB_CTRL_SET_TIMEOUT); if (ret < 0) return ret; if (buf[0] != 0x06 && (buf[0] != 0x15 || buf[1] != 0x01)) return -ENODEV; /* start sending data */ return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), TSC10_CMD_DATA1, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); } static int dmc_tsc10_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { dev->x = ((pkt[2] & 0x03) << 8) | pkt[1]; dev->y = ((pkt[4] & 0x03) << 8) | pkt[3]; dev->touch = pkt[0] & 0x01; return 1; } static const struct usbtouch_device_info dmc_tsc10_dev_info = { .min_xc = 0x0, .max_xc = 0x03ff, .min_yc = 0x0, .max_yc = 0x03ff, .rept_size = 5, .init = dmc_tsc10_init, .read_data = dmc_tsc10_read_data, }; #endif /***************************************************************************** * IRTOUCH Part */ #ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH static int irtouch_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { dev->x = (pkt[3] << 8) | pkt[2]; dev->y = (pkt[5] << 8) | pkt[4]; dev->touch = (pkt[1] & 0x03) ? 1 : 0; return 1; } static const struct usbtouch_device_info irtouch_dev_info = { .min_xc = 0x0, .max_xc = 0x0fff, .min_yc = 0x0, .max_yc = 0x0fff, .rept_size = 8, .read_data = irtouch_read_data, }; static const struct usbtouch_device_info irtouch_hires_dev_info = { .min_xc = 0x0, .max_xc = 0x7fff, .min_yc = 0x0, .max_yc = 0x7fff, .rept_size = 8, .read_data = irtouch_read_data, }; #endif /***************************************************************************** * ET&T TC5UH/TC4UM part */ #ifdef CONFIG_TOUCHSCREEN_USB_ETT_TC45USB static int tc45usb_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { dev->x = ((pkt[2] & 0x0F) << 8) | pkt[1]; dev->y = ((pkt[4] & 0x0F) << 8) | pkt[3]; dev->touch = pkt[0] & 0x01; return 1; } static const struct usbtouch_device_info tc45usb_dev_info = { .min_xc = 0x0, .max_xc = 0x0fff, .min_yc = 0x0, .max_yc = 0x0fff, .rept_size = 5, .read_data = tc45usb_read_data, }; #endif /***************************************************************************** * IdealTEK URTC1000 Part */ #ifdef CONFIG_TOUCHSCREEN_USB_IDEALTEK #ifndef MULTI_PACKET #define MULTI_PACKET #endif static int idealtek_get_pkt_len(unsigned char *buf, int len) { if (buf[0] & 0x80) return 5; if (buf[0] == 0x01) return len; return 0; } static int idealtek_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { switch (pkt[0] & 0x98) { case 0x88: /* touch data in IdealTEK mode */ dev->x = (pkt[1] << 5) | (pkt[2] >> 2); dev->y = (pkt[3] << 5) | (pkt[4] >> 2); dev->touch = (pkt[0] & 0x40) ? 1 : 0; return 1; case 0x98: /* touch data in MT emulation mode */ dev->x = (pkt[2] << 5) | (pkt[1] >> 2); dev->y = (pkt[4] << 5) | (pkt[3] >> 2); dev->touch = (pkt[0] & 0x40) ? 1 : 0; return 1; default: return 0; } } static const struct usbtouch_device_info idealtek_dev_info = { .min_xc = 0x0, .max_xc = 0x0fff, .min_yc = 0x0, .max_yc = 0x0fff, .rept_size = 8, .get_pkt_len = idealtek_get_pkt_len, .read_data = idealtek_read_data, }; #endif /***************************************************************************** * General Touch Part */ #ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH static int general_touch_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { dev->x = (pkt[2] << 8) | pkt[1]; dev->y = (pkt[4] << 8) | pkt[3]; dev->press = pkt[5] & 0xff; dev->touch = pkt[0] & 0x01; return 1; } static const struct usbtouch_device_info general_touch_dev_info = { .min_xc = 0x0, .max_xc = 0x7fff, .min_yc = 0x0, .max_yc = 0x7fff, .rept_size = 7, .read_data = general_touch_read_data, }; #endif /***************************************************************************** * GoTop Part */ #ifdef CONFIG_TOUCHSCREEN_USB_GOTOP static int gotop_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { dev->x = ((pkt[1] & 0x38) << 4) | pkt[2]; dev->y = ((pkt[1] & 0x07) << 7) | pkt[3]; dev->touch = pkt[0] & 0x01; return 1; } static const struct usbtouch_device_info gotop_dev_info = { .min_xc = 0x0, .max_xc = 0x03ff, .min_yc = 0x0, .max_yc = 0x03ff, .rept_size = 4, .read_data = gotop_read_data, }; #endif /***************************************************************************** * JASTEC Part */ #ifdef CONFIG_TOUCHSCREEN_USB_JASTEC static int jastec_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { dev->x = ((pkt[0] & 0x3f) << 6) | (pkt[2] & 0x3f); dev->y = ((pkt[1] & 0x3f) << 6) | (pkt[3] & 0x3f); dev->touch = (pkt[0] & 0x40) >> 6; return 1; } static const struct usbtouch_device_info jastec_dev_info = { .min_xc = 0x0, .max_xc = 0x0fff, .min_yc = 0x0, .max_yc = 0x0fff, .rept_size = 4, .read_data = jastec_read_data, }; #endif /***************************************************************************** * Zytronic Part */ #ifdef CONFIG_TOUCHSCREEN_USB_ZYTRONIC static int zytronic_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { struct usb_interface *intf = dev->interface; switch (pkt[0]) { case 0x3A: /* command response */ dev_dbg(&intf->dev, "%s: Command response %d\n", __func__, pkt[1]); break; case 0xC0: /* down */ dev->x = (pkt[1] & 0x7f) | ((pkt[2] & 0x07) << 7); dev->y = (pkt[3] & 0x7f) | ((pkt[4] & 0x07) << 7); dev->touch = 1; dev_dbg(&intf->dev, "%s: down %d,%d\n", __func__, dev->x, dev->y); return 1; case 0x80: /* up */ dev->x = (pkt[1] & 0x7f) | ((pkt[2] & 0x07) << 7); dev->y = (pkt[3] & 0x7f) | ((pkt[4] & 0x07) << 7); dev->touch = 0; dev_dbg(&intf->dev, "%s: up %d,%d\n", __func__, dev->x, dev->y); return 1; default: dev_dbg(&intf->dev, "%s: Unknown return %d\n", __func__, pkt[0]); break; } return 0; } static const struct usbtouch_device_info zytronic_dev_info = { .min_xc = 0x0, .max_xc = 0x03ff, .min_yc = 0x0, .max_yc = 0x03ff, .rept_size = 5, .read_data = zytronic_read_data, .irq_always = true, }; #endif /***************************************************************************** * NEXIO Part */ #ifdef CONFIG_TOUCHSCREEN_USB_NEXIO #define NEXIO_TIMEOUT 5000 #define NEXIO_BUFSIZE 1024 #define NEXIO_THRESHOLD 50 struct nexio_priv { struct urb *ack; unsigned char *ack_buf; }; struct nexio_touch_packet { u8 flags; /* 0xe1 = touch, 0xe1 = release */ __be16 data_len; /* total bytes of touch data */ __be16 x_len; /* bytes for X axis */ __be16 y_len; /* bytes for Y axis */ u8 data[]; } __attribute__ ((packed)); static unsigned char nexio_ack_pkt[2] = { 0xaa, 0x02 }; static unsigned char nexio_init_pkt[4] = { 0x82, 0x04, 0x0a, 0x0f }; static void nexio_ack_complete(struct urb *urb) { } static int nexio_alloc(struct usbtouch_usb *usbtouch) { struct nexio_priv *priv; int ret = -ENOMEM; priv = kmalloc(sizeof(*priv), GFP_KERNEL); if (!priv) goto out_buf; usbtouch->priv = priv; priv->ack_buf = kmemdup(nexio_ack_pkt, sizeof(nexio_ack_pkt), GFP_KERNEL); if (!priv->ack_buf) goto err_priv; priv->ack = usb_alloc_urb(0, GFP_KERNEL); if (!priv->ack) { dev_dbg(&usbtouch->interface->dev, "%s - usb_alloc_urb failed: usbtouch->ack\n", __func__); goto err_ack_buf; } return 0; err_ack_buf: kfree(priv->ack_buf); err_priv: kfree(priv); out_buf: return ret; } static int nexio_init(struct usbtouch_usb *usbtouch) { struct usb_device *dev = interface_to_usbdev(usbtouch->interface); struct usb_host_interface *interface = usbtouch->interface->cur_altsetting; struct nexio_priv *priv = usbtouch->priv; int ret = -ENOMEM; int actual_len, i; char *firmware_ver = NULL, *device_name = NULL; int input_ep = 0, output_ep = 0; /* find first input and output endpoint */ for (i = 0; i < interface->desc.bNumEndpoints; i++) { if (!input_ep && usb_endpoint_dir_in(&interface->endpoint[i].desc)) input_ep = interface->endpoint[i].desc.bEndpointAddress; if (!output_ep && usb_endpoint_dir_out(&interface->endpoint[i].desc)) output_ep = interface->endpoint[i].desc.bEndpointAddress; } if (!input_ep || !output_ep) return -ENXIO; u8 *buf __free(kfree) = kmalloc(NEXIO_BUFSIZE, GFP_NOIO); if (!buf) return -ENOMEM; /* two empty reads */ for (i = 0; i < 2; i++) { ret = usb_bulk_msg(dev, usb_rcvbulkpipe(dev, input_ep), buf, NEXIO_BUFSIZE, &actual_len, NEXIO_TIMEOUT); if (ret < 0) return ret; } /* send init command */ memcpy(buf, nexio_init_pkt, sizeof(nexio_init_pkt)); ret = usb_bulk_msg(dev, usb_sndbulkpipe(dev, output_ep), buf, sizeof(nexio_init_pkt), &actual_len, NEXIO_TIMEOUT); if (ret < 0) return ret; /* read replies */ for (i = 0; i < 3; i++) { memset(buf, 0, NEXIO_BUFSIZE); ret = usb_bulk_msg(dev, usb_rcvbulkpipe(dev, input_ep), buf, NEXIO_BUFSIZE, &actual_len, NEXIO_TIMEOUT); if (ret < 0 || actual_len < 1 || buf[1] != actual_len) continue; switch (buf[0]) { case 0x83: /* firmware version */ if (!firmware_ver) firmware_ver = kstrdup(&buf[2], GFP_NOIO); break; case 0x84: /* device name */ if (!device_name) device_name = kstrdup(&buf[2], GFP_NOIO); break; } } printk(KERN_INFO "Nexio device: %s, firmware version: %s\n", device_name, firmware_ver); kfree(firmware_ver); kfree(device_name); usb_fill_bulk_urb(priv->ack, dev, usb_sndbulkpipe(dev, output_ep), priv->ack_buf, sizeof(nexio_ack_pkt), nexio_ack_complete, usbtouch); return 0; } static void nexio_exit(struct usbtouch_usb *usbtouch) { struct nexio_priv *priv = usbtouch->priv; usb_kill_urb(priv->ack); usb_free_urb(priv->ack); kfree(priv->ack_buf); kfree(priv); } static int nexio_read_data(struct usbtouch_usb *usbtouch, unsigned char *pkt) { struct device *dev = &usbtouch->interface->dev; struct nexio_touch_packet *packet = (void *) pkt; struct nexio_priv *priv = usbtouch->priv; unsigned int data_len = be16_to_cpu(packet->data_len); unsigned int x_len = be16_to_cpu(packet->x_len); unsigned int y_len = be16_to_cpu(packet->y_len); int x, y, begin_x, begin_y, end_x, end_y, w, h, ret; /* got touch data? */ if ((pkt[0] & 0xe0) != 0xe0) return 0; if (data_len > 0xff) data_len -= 0x100; if (x_len > 0xff) x_len -= 0x80; /* send ACK */ ret = usb_submit_urb(priv->ack, GFP_ATOMIC); if (ret) dev_warn(dev, "Failed to submit ACK URB: %d\n", ret); if (!input_abs_get_max(usbtouch->input, ABS_X)) { input_set_abs_params(usbtouch->input, ABS_X, 0, 2 * x_len, 0, 0); input_set_abs_params(usbtouch->input, ABS_Y, 0, 2 * y_len, 0, 0); } /* * The device reports state of IR sensors on X and Y axes. * Each byte represents "darkness" percentage (0-100) of one element. * 17" touchscreen reports only 64 x 52 bytes so the resolution is low. * This also means that there's a limited multi-touch capability but * it's disabled (and untested) here as there's no X driver for that. */ begin_x = end_x = begin_y = end_y = -1; for (x = 0; x < x_len; x++) { if (begin_x == -1 && packet->data[x] > NEXIO_THRESHOLD) { begin_x = x; continue; } if (end_x == -1 && begin_x != -1 && packet->data[x] < NEXIO_THRESHOLD) { end_x = x - 1; for (y = x_len; y < data_len; y++) { if (begin_y == -1 && packet->data[y] > NEXIO_THRESHOLD) { begin_y = y - x_len; continue; } if (end_y == -1 && begin_y != -1 && packet->data[y] < NEXIO_THRESHOLD) { end_y = y - 1 - x_len; w = end_x - begin_x; h = end_y - begin_y; #if 0 /* multi-touch */ input_report_abs(usbtouch->input, ABS_MT_TOUCH_MAJOR, max(w,h)); input_report_abs(usbtouch->input, ABS_MT_TOUCH_MINOR, min(x,h)); input_report_abs(usbtouch->input, ABS_MT_POSITION_X, 2*begin_x+w); input_report_abs(usbtouch->input, ABS_MT_POSITION_Y, 2*begin_y+h); input_report_abs(usbtouch->input, ABS_MT_ORIENTATION, w > h); input_mt_sync(usbtouch->input); #endif /* single touch */ usbtouch->x = 2 * begin_x + w; usbtouch->y = 2 * begin_y + h; usbtouch->touch = packet->flags & 0x01; begin_y = end_y = -1; return 1; } } begin_x = end_x = -1; } } return 0; } static const struct usbtouch_device_info nexio_dev_info = { .rept_size = 1024, .irq_always = true, .read_data = nexio_read_data, .alloc = nexio_alloc, .init = nexio_init, .exit = nexio_exit, }; #endif /***************************************************************************** * ELO part */ #ifdef CONFIG_TOUCHSCREEN_USB_ELO static int elo_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { dev->x = (pkt[3] << 8) | pkt[2]; dev->y = (pkt[5] << 8) | pkt[4]; dev->touch = pkt[6] > 0; dev->press = pkt[6]; return 1; } static const struct usbtouch_device_info elo_dev_info = { .min_xc = 0x0, .max_xc = 0x0fff, .min_yc = 0x0, .max_yc = 0x0fff, .max_press = 0xff, .rept_size = 8, .read_data = elo_read_data, }; #endif /***************************************************************************** * Generic Part */ static void usbtouch_process_pkt(struct usbtouch_usb *usbtouch, unsigned char *pkt, int len) { const struct usbtouch_device_info *type = usbtouch->type; if (!type->read_data(usbtouch, pkt)) return; input_report_key(usbtouch->input, BTN_TOUCH, usbtouch->touch); if (swap_xy) { input_report_abs(usbtouch->input, ABS_X, usbtouch->y); input_report_abs(usbtouch->input, ABS_Y, usbtouch->x); } else { input_report_abs(usbtouch->input, ABS_X, usbtouch->x); input_report_abs(usbtouch->input, ABS_Y, usbtouch->y); } if (type->max_press) input_report_abs(usbtouch->input, ABS_PRESSURE, usbtouch->press); input_sync(usbtouch->input); } #ifdef MULTI_PACKET static void usbtouch_process_multi(struct usbtouch_usb *usbtouch, unsigned char *pkt, int len) { unsigned char *buffer; int pkt_len, pos, buf_len, tmp; /* process buffer */ if (unlikely(usbtouch->buf_len)) { /* try to get size */ pkt_len = usbtouch->type->get_pkt_len( usbtouch->buffer, usbtouch->buf_len); /* drop? */ if (unlikely(!pkt_len)) goto out_flush_buf; /* need to append -pkt_len bytes before able to get size */ if (unlikely(pkt_len < 0)) { int append = -pkt_len; if (unlikely(append > len)) append = len; if (usbtouch->buf_len + append >= usbtouch->type->rept_size) goto out_flush_buf; memcpy(usbtouch->buffer + usbtouch->buf_len, pkt, append); usbtouch->buf_len += append; pkt_len = usbtouch->type->get_pkt_len( usbtouch->buffer, usbtouch->buf_len); if (pkt_len < 0) return; } /* append */ tmp = pkt_len - usbtouch->buf_len; if (usbtouch->buf_len + tmp >= usbtouch->type->rept_size) goto out_flush_buf; memcpy(usbtouch->buffer + usbtouch->buf_len, pkt, tmp); usbtouch_process_pkt(usbtouch, usbtouch->buffer, pkt_len); buffer = pkt + tmp; buf_len = len - tmp; } else { buffer = pkt; buf_len = len; } /* loop over the received packet, process */ pos = 0; while (pos < buf_len) { /* get packet len */ pkt_len = usbtouch->type->get_pkt_len(buffer + pos, buf_len - pos); /* unknown packet: skip one byte */ if (unlikely(!pkt_len)) { pos++; continue; } /* full packet: process */ if (likely((pkt_len > 0) && (pkt_len <= buf_len - pos))) { usbtouch_process_pkt(usbtouch, buffer + pos, pkt_len); } else { /* incomplete packet: save in buffer */ memcpy(usbtouch->buffer, buffer + pos, buf_len - pos); usbtouch->buf_len = buf_len - pos; return; } pos += pkt_len; } out_flush_buf: usbtouch->buf_len = 0; return; } #else static void usbtouch_process_multi(struct usbtouch_usb *usbtouch, unsigned char *pkt, int len) { dev_WARN_ONCE(&usbtouch->interface->dev, 1, "Protocol has ->get_pkt_len() without #define MULTI_PACKET"); } #endif static void usbtouch_irq(struct urb *urb) { struct usbtouch_usb *usbtouch = urb->context; struct device *dev = &usbtouch->interface->dev; int retval; switch (urb->status) { case 0: /* success */ break; case -ETIME: /* this urb is timing out */ dev_dbg(dev, "%s - urb timed out - was the device unplugged?\n", __func__); return; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: case -EPIPE: /* this urb is terminated, clean up */ dev_dbg(dev, "%s - urb shutting down with status: %d\n", __func__, urb->status); return; default: dev_dbg(dev, "%s - nonzero urb status received: %d\n", __func__, urb->status); goto exit; } usbtouch->process_pkt(usbtouch, usbtouch->data, urb->actual_length); exit: usb_mark_last_busy(interface_to_usbdev(usbtouch->interface)); retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(dev, "%s - usb_submit_urb failed with result: %d\n", __func__, retval); } static int usbtouch_start_io(struct usbtouch_usb *usbtouch) { guard(mutex)(&usbtouch->pm_mutex); if (!usbtouch->type->irq_always) if (usb_submit_urb(usbtouch->irq, GFP_KERNEL)) return -EIO; usbtouch->interface->needs_remote_wakeup = 1; usbtouch->is_open = true; return 0; } static int usbtouch_open(struct input_dev *input) { struct usbtouch_usb *usbtouch = input_get_drvdata(input); int r; usbtouch->irq->dev = interface_to_usbdev(usbtouch->interface); r = usb_autopm_get_interface(usbtouch->interface) ? -EIO : 0; if (r) return r; r = usbtouch_start_io(usbtouch); usb_autopm_put_interface(usbtouch->interface); return r; } static void usbtouch_close(struct input_dev *input) { struct usbtouch_usb *usbtouch = input_get_drvdata(input); int r; scoped_guard(mutex, &usbtouch->pm_mutex) { if (!usbtouch->type->irq_always) usb_kill_urb(usbtouch->irq); usbtouch->is_open = false; } r = usb_autopm_get_interface(usbtouch->interface); usbtouch->interface->needs_remote_wakeup = 0; if (!r) usb_autopm_put_interface(usbtouch->interface); } static int usbtouch_suspend(struct usb_interface *intf, pm_message_t message) { struct usbtouch_usb *usbtouch = usb_get_intfdata(intf); usb_kill_urb(usbtouch->irq); return 0; } static int usbtouch_resume(struct usb_interface *intf) { struct usbtouch_usb *usbtouch = usb_get_intfdata(intf); guard(mutex)(&usbtouch->pm_mutex); if (usbtouch->is_open || usbtouch->type->irq_always) return usb_submit_urb(usbtouch->irq, GFP_NOIO); return 0; } static int usbtouch_reset_resume(struct usb_interface *intf) { struct usbtouch_usb *usbtouch = usb_get_intfdata(intf); int err; /* reinit the device */ if (usbtouch->type->init) { err = usbtouch->type->init(usbtouch); if (err) { dev_dbg(&intf->dev, "%s - type->init() failed, err: %d\n", __func__, err); return err; } } /* restart IO if needed */ guard(mutex)(&usbtouch->pm_mutex); if (usbtouch->is_open) return usb_submit_urb(usbtouch->irq, GFP_NOIO); return 0; } static void usbtouch_free_buffers(struct usb_device *udev, struct usbtouch_usb *usbtouch) { usb_free_coherent(udev, usbtouch->data_size, usbtouch->data, usbtouch->data_dma); kfree(usbtouch->buffer); } static struct usb_endpoint_descriptor * usbtouch_get_input_endpoint(struct usb_host_interface *interface) { int i; for (i = 0; i < interface->desc.bNumEndpoints; i++) if (usb_endpoint_dir_in(&interface->endpoint[i].desc)) return &interface->endpoint[i].desc; return NULL; } static int usbtouch_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usbtouch_usb *usbtouch; struct input_dev *input_dev; struct usb_endpoint_descriptor *endpoint; struct usb_device *udev = interface_to_usbdev(intf); const struct usbtouch_device_info *type; int err = -ENOMEM; /* some devices are ignored */ type = (const struct usbtouch_device_info *)id->driver_info; if (!type) return -ENODEV; endpoint = usbtouch_get_input_endpoint(intf->cur_altsetting); if (!endpoint) return -ENXIO; usbtouch = kzalloc(sizeof(*usbtouch), GFP_KERNEL); input_dev = input_allocate_device(); if (!usbtouch || !input_dev) goto out_free; mutex_init(&usbtouch->pm_mutex); usbtouch->type = type; usbtouch->data_size = type->rept_size; if (type->get_pkt_len) { /* * When dealing with variable-length packets we should * not request more than wMaxPacketSize bytes at once * as we do not know if there is more data coming or * we filled exactly wMaxPacketSize bytes and there is * nothing else. */ usbtouch->data_size = min(usbtouch->data_size, usb_endpoint_maxp(endpoint)); } usbtouch->data = usb_alloc_coherent(udev, usbtouch->data_size, GFP_KERNEL, &usbtouch->data_dma); if (!usbtouch->data) goto out_free; if (type->get_pkt_len) { usbtouch->buffer = kmalloc(type->rept_size, GFP_KERNEL); if (!usbtouch->buffer) goto out_free_buffers; usbtouch->process_pkt = usbtouch_process_multi; } else { usbtouch->process_pkt = usbtouch_process_pkt; } usbtouch->irq = usb_alloc_urb(0, GFP_KERNEL); if (!usbtouch->irq) { dev_dbg(&intf->dev, "%s - usb_alloc_urb failed: usbtouch->irq\n", __func__); goto out_free_buffers; } usbtouch->interface = intf; usbtouch->input = input_dev; if (udev->manufacturer) strscpy(usbtouch->name, udev->manufacturer, sizeof(usbtouch->name)); if (udev->product) { if (udev->manufacturer) strlcat(usbtouch->name, " ", sizeof(usbtouch->name)); strlcat(usbtouch->name, udev->product, sizeof(usbtouch->name)); } if (!strlen(usbtouch->name)) snprintf(usbtouch->name, sizeof(usbtouch->name), "USB Touchscreen %04x:%04x", le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct)); usb_make_path(udev, usbtouch->phys, sizeof(usbtouch->phys)); strlcat(usbtouch->phys, "/input0", sizeof(usbtouch->phys)); input_dev->name = usbtouch->name; input_dev->phys = usbtouch->phys; usb_to_input_id(udev, &input_dev->id); input_dev->dev.parent = &intf->dev; input_set_drvdata(input_dev, usbtouch); input_dev->open = usbtouch_open; input_dev->close = usbtouch_close; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(input_dev, ABS_X, type->min_xc, type->max_xc, 0, 0); input_set_abs_params(input_dev, ABS_Y, type->min_yc, type->max_yc, 0, 0); if (type->max_press) input_set_abs_params(input_dev, ABS_PRESSURE, type->min_press, type->max_press, 0, 0); if (usb_endpoint_type(endpoint) == USB_ENDPOINT_XFER_INT) usb_fill_int_urb(usbtouch->irq, udev, usb_rcvintpipe(udev, endpoint->bEndpointAddress), usbtouch->data, usbtouch->data_size, usbtouch_irq, usbtouch, endpoint->bInterval); else usb_fill_bulk_urb(usbtouch->irq, udev, usb_rcvbulkpipe(udev, endpoint->bEndpointAddress), usbtouch->data, usbtouch->data_size, usbtouch_irq, usbtouch); usbtouch->irq->dev = udev; usbtouch->irq->transfer_dma = usbtouch->data_dma; usbtouch->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; /* device specific allocations */ if (type->alloc) { err = type->alloc(usbtouch); if (err) { dev_dbg(&intf->dev, "%s - type->alloc() failed, err: %d\n", __func__, err); goto out_free_urb; } } /* device specific initialisation*/ if (type->init) { err = type->init(usbtouch); if (err) { dev_dbg(&intf->dev, "%s - type->init() failed, err: %d\n", __func__, err); goto out_do_exit; } } err = input_register_device(usbtouch->input); if (err) { dev_dbg(&intf->dev, "%s - input_register_device failed, err: %d\n", __func__, err); goto out_do_exit; } usb_set_intfdata(intf, usbtouch); if (usbtouch->type->irq_always) { /* this can't fail */ usb_autopm_get_interface(intf); err = usb_submit_urb(usbtouch->irq, GFP_KERNEL); if (err) { usb_autopm_put_interface(intf); dev_err(&intf->dev, "%s - usb_submit_urb failed with result: %d\n", __func__, err); goto out_unregister_input; } } return 0; out_unregister_input: input_unregister_device(input_dev); input_dev = NULL; out_do_exit: if (type->exit) type->exit(usbtouch); out_free_urb: usb_free_urb(usbtouch->irq); out_free_buffers: usbtouch_free_buffers(udev, usbtouch); out_free: input_free_device(input_dev); kfree(usbtouch); return err; } static void usbtouch_disconnect(struct usb_interface *intf) { struct usbtouch_usb *usbtouch = usb_get_intfdata(intf); if (!usbtouch) return; dev_dbg(&intf->dev, "%s - usbtouch is initialized, cleaning up\n", __func__); usb_set_intfdata(intf, NULL); /* this will stop IO via close */ input_unregister_device(usbtouch->input); usb_free_urb(usbtouch->irq); if (usbtouch->type->exit) usbtouch->type->exit(usbtouch); usbtouch_free_buffers(interface_to_usbdev(intf), usbtouch); kfree(usbtouch); } static const struct attribute_group *usbtouch_groups[] = { #ifdef CONFIG_TOUCHSCREEN_USB_3M &mtouch_attr_group, #endif NULL }; static const struct usb_device_id usbtouch_devices[] = { #ifdef CONFIG_TOUCHSCREEN_USB_EGALAX /* ignore the HID capable devices, handled by usbhid */ { USB_DEVICE_INTERFACE_CLASS(0x0eef, 0x0001, USB_INTERFACE_CLASS_HID), .driver_info = 0 }, { USB_DEVICE_INTERFACE_CLASS(0x0eef, 0x0002, USB_INTERFACE_CLASS_HID), .driver_info = 0 }, /* normal device IDs */ { USB_DEVICE(0x3823, 0x0001), .driver_info = (kernel_ulong_t)&egalax_dev_info }, { USB_DEVICE(0x3823, 0x0002), .driver_info = (kernel_ulong_t)&egalax_dev_info }, { USB_DEVICE(0x0123, 0x0001), .driver_info = (kernel_ulong_t)&egalax_dev_info }, { USB_DEVICE(0x0eef, 0x0001), .driver_info = (kernel_ulong_t)&egalax_dev_info }, { USB_DEVICE(0x0eef, 0x0002), .driver_info = (kernel_ulong_t)&egalax_dev_info }, { USB_DEVICE(0x1234, 0x0001), .driver_info = (kernel_ulong_t)&egalax_dev_info }, { USB_DEVICE(0x1234, 0x0002), .driver_info = (kernel_ulong_t)&egalax_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_PANJIT { USB_DEVICE(0x134c, 0x0001), .driver_info = (kernel_ulong_t)&panjit_dev_info }, { USB_DEVICE(0x134c, 0x0002), .driver_info = (kernel_ulong_t)&panjit_dev_info }, { USB_DEVICE(0x134c, 0x0003), .driver_info = (kernel_ulong_t)&panjit_dev_info }, { USB_DEVICE(0x134c, 0x0004), .driver_info = (kernel_ulong_t)&panjit_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_3M { USB_DEVICE(0x0596, 0x0001), .driver_info = (kernel_ulong_t)&mtouch_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_ITM { USB_DEVICE(0x0403, 0xf9e9), .driver_info = (kernel_ulong_t)&itm_dev_info }, { USB_DEVICE(0x16e3, 0xf9e9), .driver_info = (kernel_ulong_t)&itm_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_ETURBO { USB_DEVICE(0x1234, 0x5678), .driver_info = (kernel_ulong_t)&eturbo_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_GUNZE { USB_DEVICE(0x0637, 0x0001), .driver_info = (kernel_ulong_t)&gunze_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_DMC_TSC10 { USB_DEVICE(0x0afa, 0x03e8), .driver_info = (kernel_ulong_t)&dmc_tsc10_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH { USB_DEVICE(0x255e, 0x0001), .driver_info = (kernel_ulong_t)&irtouch_dev_info }, { USB_DEVICE(0x595a, 0x0001), .driver_info = (kernel_ulong_t)&irtouch_dev_info }, { USB_DEVICE(0x6615, 0x0001), .driver_info = (kernel_ulong_t)&irtouch_dev_info }, { USB_DEVICE(0x6615, 0x0012), .driver_info = (kernel_ulong_t)&irtouch_hires_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_IDEALTEK { USB_DEVICE(0x1391, 0x1000), .driver_info = (kernel_ulong_t)&idealtek_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH { USB_DEVICE(0x0dfc, 0x0001), .driver_info = (kernel_ulong_t)&general_touch_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_GOTOP { USB_DEVICE(0x08f2, 0x007f), .driver_info = (kernel_ulong_t)&gotop_dev_info }, { USB_DEVICE(0x08f2, 0x00ce), .driver_info = (kernel_ulong_t)&gotop_dev_info }, { USB_DEVICE(0x08f2, 0x00f4), .driver_info = (kernel_ulong_t)&gotop_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_JASTEC { USB_DEVICE(0x0f92, 0x0001), .driver_info = (kernel_ulong_t)&jastec_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_E2I { USB_DEVICE(0x1ac7, 0x0001), .driver_info = (kernel_ulong_t)&e2i_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_ZYTRONIC { USB_DEVICE(0x14c8, 0x0003), .driver_info = (kernel_ulong_t)&zytronic_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_ETT_TC45USB /* TC5UH */ { USB_DEVICE(0x0664, 0x0309), .driver_info = (kernel_ulong_t)&tc45usb_dev_info }, /* TC4UM */ { USB_DEVICE(0x0664, 0x0306), .driver_info = (kernel_ulong_t)&tc45usb_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_NEXIO /* data interface only */ { USB_DEVICE_AND_INTERFACE_INFO(0x10f0, 0x2002, 0x0a, 0x00, 0x00), .driver_info = (kernel_ulong_t)&nexio_dev_info }, { USB_DEVICE_AND_INTERFACE_INFO(0x1870, 0x0001, 0x0a, 0x00, 0x00), .driver_info = (kernel_ulong_t)&nexio_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_ELO { USB_DEVICE(0x04e7, 0x0020), .driver_info = (kernel_ulong_t)&elo_dev_info }, #endif #ifdef CONFIG_TOUCHSCREEN_USB_EASYTOUCH { USB_DEVICE(0x7374, 0x0001), .driver_info = (kernel_ulong_t)&etouch_dev_info }, #endif { } }; MODULE_DEVICE_TABLE(usb, usbtouch_devices); static struct usb_driver usbtouch_driver = { .name = "usbtouchscreen", .probe = usbtouch_probe, .disconnect = usbtouch_disconnect, .suspend = usbtouch_suspend, .resume = usbtouch_resume, .reset_resume = usbtouch_reset_resume, .id_table = usbtouch_devices, .dev_groups = usbtouch_groups, .supports_autosuspend = 1, }; module_usb_driver(usbtouch_driver); MODULE_AUTHOR("Daniel Ritz <daniel.ritz@gmx.ch>"); MODULE_DESCRIPTION("USB Touchscreen Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("touchkitusb"); MODULE_ALIAS("itmtouch"); MODULE_ALIAS("mtouchusb");
1426 134 1426 9 2 8 8 18 18 18 18 15 14 1 13 3 3 3 10 7 3 5 4 5 2 1 7 11 11 18 574 8 177 5 3 4 682 664 682 991 1468 1471 1469 1456 1470 3 2 1469 3 1470 574 1468 1470 1467 1470 43 1468 1461 3 1461 99 1458 1459 991 991 990 990 749 791 790 873 669 790 841 53 987 986 124 118 979 985 983 1 499 442 441 8 7 7 59 1447 88 995 89 74 1446 1350 1266 952 1341 811 808 775 10 766 565 120 119 120 1308 954 850 1342 573 573 10 572 609 610 610 1586 1583 1024 606 443 1165 171 2 469 1583 1580 1575 1574 1583 1029 1029 1029 1111 1220 1225 8537 8543 8527 691 140 692 13 10 10 10 9 9 8 2 133 20 39 39 39 17 39 38 37 37 36 36 5 33 32 1 33 33 22 22 22 22 22 5 5 4 4 3 2 1 2 18 22 13 25 8 175 175 175 175 174 174 175 175 174 175 175 175 63 175 175 175 175 115 116 116 116 116 116 116 116 1551 1542 218 3 2 1551 2 2 1607 220 1507 104 1493 351 8 1 1 11 115 115 3 3 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 // SPDX-License-Identifier: GPL-2.0-only /* * mm/mmap.c * * Written by obz. * * Address space accounting code <alan@lxorguk.ukuu.org.uk> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/slab.h> #include <linux/backing-dev.h> #include <linux/mm.h> #include <linux/mm_inline.h> #include <linux/shm.h> #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/syscalls.h> #include <linux/capability.h> #include <linux/init.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/personality.h> #include <linux/security.h> #include <linux/hugetlb.h> #include <linux/shmem_fs.h> #include <linux/profile.h> #include <linux/export.h> #include <linux/mount.h> #include <linux/mempolicy.h> #include <linux/rmap.h> #include <linux/mmu_notifier.h> #include <linux/mmdebug.h> #include <linux/perf_event.h> #include <linux/audit.h> #include <linux/khugepaged.h> #include <linux/uprobes.h> #include <linux/notifier.h> #include <linux/memory.h> #include <linux/printk.h> #include <linux/userfaultfd_k.h> #include <linux/moduleparam.h> #include <linux/pkeys.h> #include <linux/oom.h> #include <linux/sched/mm.h> #include <linux/ksm.h> #include <linux/memfd.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> #include <asm/tlb.h> #include <asm/mmu_context.h> #define CREATE_TRACE_POINTS #include <trace/events/mmap.h> #include "internal.h" #ifndef arch_mmap_check #define arch_mmap_check(addr, len, flags) (0) #endif #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN; int mmap_rnd_bits_max __ro_after_init = CONFIG_ARCH_MMAP_RND_BITS_MAX; int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS; #endif #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN; const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX; int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS; #endif static bool ignore_rlimit_data; core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644); /* Update vma->vm_page_prot to reflect vma->vm_flags. */ void vma_set_page_prot(struct vm_area_struct *vma) { unsigned long vm_flags = vma->vm_flags; pgprot_t vm_page_prot; vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); if (vma_wants_writenotify(vma, vm_page_prot)) { vm_flags &= ~VM_SHARED; vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags); } /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */ WRITE_ONCE(vma->vm_page_prot, vm_page_prot); } /* * check_brk_limits() - Use platform specific check of range & verify mlock * limits. * @addr: The address to check * @len: The size of increase. * * Return: 0 on success. */ static int check_brk_limits(unsigned long addr, unsigned long len) { unsigned long mapped_addr; mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); if (IS_ERR_VALUE(mapped_addr)) return mapped_addr; return mlock_future_ok(current->mm, current->mm->def_flags, len) ? 0 : -EAGAIN; } SYSCALL_DEFINE1(brk, unsigned long, brk) { unsigned long newbrk, oldbrk, origbrk; struct mm_struct *mm = current->mm; struct vm_area_struct *brkvma, *next = NULL; unsigned long min_brk; bool populate = false; LIST_HEAD(uf); struct vma_iterator vmi; if (mmap_write_lock_killable(mm)) return -EINTR; origbrk = mm->brk; #ifdef CONFIG_COMPAT_BRK /* * CONFIG_COMPAT_BRK can still be overridden by setting * randomize_va_space to 2, which will still cause mm->start_brk * to be arbitrarily shifted */ if (current->brk_randomized) min_brk = mm->start_brk; else min_brk = mm->end_data; #else min_brk = mm->start_brk; #endif if (brk < min_brk) goto out; /* * Check against rlimit here. If this check is done later after the test * of oldbrk with newbrk then it can escape the test and let the data * segment grow beyond its set limit the in case where the limit is * not page aligned -Ram Gupta */ if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, mm->end_data, mm->start_data)) goto out; newbrk = PAGE_ALIGN(brk); oldbrk = PAGE_ALIGN(mm->brk); if (oldbrk == newbrk) { mm->brk = brk; goto success; } /* Always allow shrinking brk. */ if (brk <= mm->brk) { /* Search one past newbrk */ vma_iter_init(&vmi, mm, newbrk); brkvma = vma_find(&vmi, oldbrk); if (!brkvma || brkvma->vm_start >= oldbrk) goto out; /* mapping intersects with an existing non-brk vma. */ /* * mm->brk must be protected by write mmap_lock. * do_vmi_align_munmap() will drop the lock on success, so * update it before calling do_vma_munmap(). */ mm->brk = brk; if (do_vmi_align_munmap(&vmi, brkvma, mm, newbrk, oldbrk, &uf, /* unlock = */ true)) goto out; goto success_unlocked; } if (check_brk_limits(oldbrk, newbrk - oldbrk)) goto out; /* * Only check if the next VMA is within the stack_guard_gap of the * expansion area */ vma_iter_init(&vmi, mm, oldbrk); next = vma_find(&vmi, newbrk + PAGE_SIZE + stack_guard_gap); if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) goto out; brkvma = vma_prev_limit(&vmi, mm->start_brk); /* Ok, looks good - let it rip. */ if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0) goto out; mm->brk = brk; if (mm->def_flags & VM_LOCKED) populate = true; success: mmap_write_unlock(mm); success_unlocked: userfaultfd_unmap_complete(mm, &uf); if (populate) mm_populate(oldbrk, newbrk - oldbrk); return brk; out: mm->brk = origbrk; mmap_write_unlock(mm); return origbrk; } /* * If a hint addr is less than mmap_min_addr change hint to be as * low as possible but still greater than mmap_min_addr */ static inline unsigned long round_hint_to_min(unsigned long hint) { hint &= PAGE_MASK; if (((void *)hint != NULL) && (hint < mmap_min_addr)) return PAGE_ALIGN(mmap_min_addr); return hint; } bool mlock_future_ok(struct mm_struct *mm, unsigned long flags, unsigned long bytes) { unsigned long locked_pages, limit_pages; if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) return true; locked_pages = bytes >> PAGE_SHIFT; locked_pages += mm->locked_vm; limit_pages = rlimit(RLIMIT_MEMLOCK); limit_pages >>= PAGE_SHIFT; return locked_pages <= limit_pages; } static inline u64 file_mmap_size_max(struct file *file, struct inode *inode) { if (S_ISREG(inode->i_mode)) return MAX_LFS_FILESIZE; if (S_ISBLK(inode->i_mode)) return MAX_LFS_FILESIZE; if (S_ISSOCK(inode->i_mode)) return MAX_LFS_FILESIZE; /* Special "we do even unsigned file positions" case */ if (file->f_op->fop_flags & FOP_UNSIGNED_OFFSET) return 0; /* Yes, random drivers might want more. But I'm tired of buggy drivers */ return ULONG_MAX; } static inline bool file_mmap_ok(struct file *file, struct inode *inode, unsigned long pgoff, unsigned long len) { u64 maxsize = file_mmap_size_max(file, inode); if (maxsize && len > maxsize) return false; maxsize -= len; if (pgoff > maxsize >> PAGE_SHIFT) return false; return true; } /** * do_mmap() - Perform a userland memory mapping into the current process * address space of length @len with protection bits @prot, mmap flags @flags * (from which VMA flags will be inferred), and any additional VMA flags to * apply @vm_flags. If this is a file-backed mapping then the file is specified * in @file and page offset into the file via @pgoff. * * This function does not perform security checks on the file and assumes, if * @uf is non-NULL, the caller has provided a list head to track unmap events * for userfaultfd @uf. * * It also simply indicates whether memory population is required by setting * @populate, which must be non-NULL, expecting the caller to actually perform * this task itself if appropriate. * * This function will invoke architecture-specific (and if provided and * relevant, file system-specific) logic to determine the most appropriate * unmapped area in which to place the mapping if not MAP_FIXED. * * Callers which require userland mmap() behaviour should invoke vm_mmap(), * which is also exported for module use. * * Those which require this behaviour less security checks, userfaultfd and * populate behaviour, and who handle the mmap write lock themselves, should * call this function. * * Note that the returned address may reside within a merged VMA if an * appropriate merge were to take place, so it doesn't necessarily specify the * start of a VMA, rather only the start of a valid mapped range of length * @len bytes, rounded down to the nearest page size. * * The caller must write-lock current->mm->mmap_lock. * * @file: An optional struct file pointer describing the file which is to be * mapped, if a file-backed mapping. * @addr: If non-zero, hints at (or if @flags has MAP_FIXED set, specifies) the * address at which to perform this mapping. See mmap (2) for details. Must be * page-aligned. * @len: The length of the mapping. Will be page-aligned and must be at least 1 * page in size. * @prot: Protection bits describing access required to the mapping. See mmap * (2) for details. * @flags: Flags specifying how the mapping should be performed, see mmap (2) * for details. * @vm_flags: VMA flags which should be set by default, or 0 otherwise. * @pgoff: Page offset into the @file if file-backed, should be 0 otherwise. * @populate: A pointer to a value which will be set to 0 if no population of * the range is required, or the number of bytes to populate if it is. Must be * non-NULL. See mmap (2) for details as to under what circumstances population * of the range occurs. * @uf: An optional pointer to a list head to track userfaultfd unmap events * should unmapping events arise. If provided, it is up to the caller to manage * this. * * Returns: Either an error, or the address at which the requested mapping has * been performed. */ unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, struct list_head *uf) { struct mm_struct *mm = current->mm; int pkey = 0; *populate = 0; mmap_assert_write_locked(mm); if (!len) return -EINVAL; /* * Does the application expect PROT_READ to imply PROT_EXEC? * * (the exception is when the underlying filesystem is noexec * mounted, in which case we don't add PROT_EXEC.) */ if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) if (!(file && path_noexec(&file->f_path))) prot |= PROT_EXEC; /* force arch specific MAP_FIXED handling in get_unmapped_area */ if (flags & MAP_FIXED_NOREPLACE) flags |= MAP_FIXED; if (!(flags & MAP_FIXED)) addr = round_hint_to_min(addr); /* Careful about overflows.. */ len = PAGE_ALIGN(len); if (!len) return -ENOMEM; /* offset overflow? */ if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) return -EOVERFLOW; /* Too many mappings? */ if (mm->map_count > sysctl_max_map_count) return -ENOMEM; /* * addr is returned from get_unmapped_area, * There are two cases: * 1> MAP_FIXED == false * unallocated memory, no need to check sealing. * 1> MAP_FIXED == true * sealing is checked inside mmap_region when * do_vmi_munmap is called. */ if (prot == PROT_EXEC) { pkey = execute_only_pkey(mm); if (pkey < 0) pkey = 0; } /* Do simple checking here so the lower-level routines won't have * to. we assume access permissions have been handled by the open * of the memory object, so we don't do any here. */ vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(file, flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; /* Obtain the address to map to. we verify (or select) it and ensure * that it represents a valid section of the address space. */ addr = __get_unmapped_area(file, addr, len, pgoff, flags, vm_flags); if (IS_ERR_VALUE(addr)) return addr; if (flags & MAP_FIXED_NOREPLACE) { if (find_vma_intersection(mm, addr, addr + len)) return -EEXIST; } if (flags & MAP_LOCKED) if (!can_do_mlock()) return -EPERM; if (!mlock_future_ok(mm, vm_flags, len)) return -EAGAIN; if (file) { struct inode *inode = file_inode(file); unsigned long flags_mask; int err; if (!file_mmap_ok(file, inode, pgoff, len)) return -EOVERFLOW; flags_mask = LEGACY_MAP_MASK; if (file->f_op->fop_flags & FOP_MMAP_SYNC) flags_mask |= MAP_SYNC; switch (flags & MAP_TYPE) { case MAP_SHARED: /* * Force use of MAP_SHARED_VALIDATE with non-legacy * flags. E.g. MAP_SYNC is dangerous to use with * MAP_SHARED as you don't know which consistency model * you will get. We silently ignore unsupported flags * with MAP_SHARED to preserve backward compatibility. */ flags &= LEGACY_MAP_MASK; fallthrough; case MAP_SHARED_VALIDATE: if (flags & ~flags_mask) return -EOPNOTSUPP; if (prot & PROT_WRITE) { if (!(file->f_mode & FMODE_WRITE)) return -EACCES; if (IS_SWAPFILE(file->f_mapping->host)) return -ETXTBSY; } /* * Make sure we don't allow writing to an append-only * file.. */ if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) return -EACCES; vm_flags |= VM_SHARED | VM_MAYSHARE; if (!(file->f_mode & FMODE_WRITE)) vm_flags &= ~(VM_MAYWRITE | VM_SHARED); fallthrough; case MAP_PRIVATE: if (!(file->f_mode & FMODE_READ)) return -EACCES; if (path_noexec(&file->f_path)) { if (vm_flags & VM_EXEC) return -EPERM; vm_flags &= ~VM_MAYEXEC; } if (!file->f_op->mmap) return -ENODEV; if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) return -EINVAL; break; default: return -EINVAL; } /* * Check to see if we are violating any seals and update VMA * flags if necessary to avoid future seal violations. */ err = memfd_check_seals_mmap(file, &vm_flags); if (err) return (unsigned long)err; } else { switch (flags & MAP_TYPE) { case MAP_SHARED: if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) return -EINVAL; /* * Ignore pgoff. */ pgoff = 0; vm_flags |= VM_SHARED | VM_MAYSHARE; break; case MAP_DROPPABLE: if (VM_DROPPABLE == VM_NONE) return -ENOTSUPP; /* * A locked or stack area makes no sense to be droppable. * * Also, since droppable pages can just go away at any time * it makes no sense to copy them on fork or dump them. * * And don't attempt to combine with hugetlb for now. */ if (flags & (MAP_LOCKED | MAP_HUGETLB)) return -EINVAL; if (vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) return -EINVAL; vm_flags |= VM_DROPPABLE; /* * If the pages can be dropped, then it doesn't make * sense to reserve them. */ vm_flags |= VM_NORESERVE; /* * Likewise, they're volatile enough that they * shouldn't survive forks or coredumps. */ vm_flags |= VM_WIPEONFORK | VM_DONTDUMP; fallthrough; case MAP_PRIVATE: /* * Set pgoff according to addr for anon_vma. */ pgoff = addr >> PAGE_SHIFT; break; default: return -EINVAL; } } /* * Set 'VM_NORESERVE' if we should not account for the * memory use of this mapping. */ if (flags & MAP_NORESERVE) { /* We honor MAP_NORESERVE if allowed to overcommit */ if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) vm_flags |= VM_NORESERVE; /* hugetlb applies strict overcommit unless MAP_NORESERVE */ if (file && is_file_hugepages(file)) vm_flags |= VM_NORESERVE; } addr = mmap_region(file, addr, len, vm_flags, pgoff, uf); if (!IS_ERR_VALUE(addr) && ((vm_flags & VM_LOCKED) || (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) *populate = len; return addr; } unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) { struct file *file = NULL; unsigned long retval; if (!(flags & MAP_ANONYMOUS)) { audit_mmap_fd(fd, flags); file = fget(fd); if (!file) return -EBADF; if (is_file_hugepages(file)) { len = ALIGN(len, huge_page_size(hstate_file(file))); } else if (unlikely(flags & MAP_HUGETLB)) { retval = -EINVAL; goto out_fput; } } else if (flags & MAP_HUGETLB) { struct hstate *hs; hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); if (!hs) return -EINVAL; len = ALIGN(len, huge_page_size(hs)); /* * VM_NORESERVE is used because the reservations will be * taken when vm_ops->mmap() is called */ file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE, HUGETLB_ANONHUGE_INODE, (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); if (IS_ERR(file)) return PTR_ERR(file); } retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); out_fput: if (file) fput(file); return retval; } SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, pgoff) { return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); } #ifdef __ARCH_WANT_SYS_OLD_MMAP struct mmap_arg_struct { unsigned long addr; unsigned long len; unsigned long prot; unsigned long flags; unsigned long fd; unsigned long offset; }; SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) { struct mmap_arg_struct a; if (copy_from_user(&a, arg, sizeof(a))) return -EFAULT; if (offset_in_page(a.offset)) return -EINVAL; return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); } #endif /* __ARCH_WANT_SYS_OLD_MMAP */ /* * Determine if the allocation needs to ensure that there is no * existing mapping within it's guard gaps, for use as start_gap. */ static inline unsigned long stack_guard_placement(vm_flags_t vm_flags) { if (vm_flags & VM_SHADOW_STACK) return PAGE_SIZE; return 0; } /* * Search for an unmapped address range. * * We are looking for a range that: * - does not intersect with any VMA; * - is contained within the [low_limit, high_limit) interval; * - is at least the desired size. * - satisfies (begin_addr & align_mask) == (align_offset & align_mask) */ unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info) { unsigned long addr; if (info->flags & VM_UNMAPPED_AREA_TOPDOWN) addr = unmapped_area_topdown(info); else addr = unmapped_area(info); trace_vm_unmapped_area(addr, info); return addr; } /* Get an address range which is currently unmapped. * For shmat() with addr=0. * * Ugly calling convention alert: * Return value with the low bits set means error value, * ie * if (ret & ~PAGE_MASK) * error = ret; * * This function "knows" that -ENOMEM has the bits set. */ unsigned long generic_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; struct vm_unmapped_area_info info = {}; const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); if (len > mmap_end - mmap_min_addr) return -ENOMEM; if (flags & MAP_FIXED) return addr; if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma_prev(mm, addr, &prev); if (mmap_end - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma)) && (!prev || addr >= vm_end_gap(prev))) return addr; } info.length = len; info.low_limit = mm->mmap_base; info.high_limit = mmap_end; info.start_gap = stack_guard_placement(vm_flags); if (filp && is_file_hugepages(filp)) info.align_mask = huge_page_mask_align(filp); return vm_unmapped_area(&info); } #ifndef HAVE_ARCH_UNMAPPED_AREA unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) { return generic_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags); } #endif /* * This mmap-allocator allocates new areas top-down from below the * stack's low limit (the base): */ unsigned long generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) { struct vm_area_struct *vma, *prev; struct mm_struct *mm = current->mm; struct vm_unmapped_area_info info = {}; const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); /* requested length too big for entire address space */ if (len > mmap_end - mmap_min_addr) return -ENOMEM; if (flags & MAP_FIXED) return addr; /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma_prev(mm, addr, &prev); if (mmap_end - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma)) && (!prev || addr >= vm_end_gap(prev))) return addr; } info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = PAGE_SIZE; info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); info.start_gap = stack_guard_placement(vm_flags); if (filp && is_file_hugepages(filp)) info.align_mask = huge_page_mask_align(filp); addr = vm_unmapped_area(&info); /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ if (offset_in_page(addr)) { VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = mmap_end; addr = vm_unmapped_area(&info); } return addr; } #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) { return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags, vm_flags); } #endif unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) { if (test_bit(MMF_TOPDOWN, &mm->flags)) return arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags, vm_flags); return arch_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags); } unsigned long __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) { unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) = NULL; unsigned long error = arch_mmap_check(addr, len, flags); if (error) return error; /* Careful about overflows.. */ if (len > TASK_SIZE) return -ENOMEM; if (file) { if (file->f_op->get_unmapped_area) get_area = file->f_op->get_unmapped_area; } else if (flags & MAP_SHARED) { /* * mmap_region() will call shmem_zero_setup() to create a file, * so use shmem's get_unmapped_area in case it can be huge. */ get_area = shmem_get_unmapped_area; } /* Always treat pgoff as zero for anonymous memory. */ if (!file) pgoff = 0; if (get_area) { addr = get_area(file, addr, len, pgoff, flags); } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && !file && !addr /* no hint */ && IS_ALIGNED(len, PMD_SIZE)) { /* Ensures that larger anonymous mappings are THP aligned. */ addr = thp_get_unmapped_area_vmflags(file, addr, len, pgoff, flags, vm_flags); } else { addr = mm_get_unmapped_area_vmflags(current->mm, file, addr, len, pgoff, flags, vm_flags); } if (IS_ERR_VALUE(addr)) return addr; if (addr > TASK_SIZE - len) return -ENOMEM; if (offset_in_page(addr)) return -EINVAL; error = security_mmap_addr(addr); return error ? error : addr; } unsigned long mm_get_unmapped_area(struct mm_struct *mm, struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { if (test_bit(MMF_TOPDOWN, &mm->flags)) return arch_get_unmapped_area_topdown(file, addr, len, pgoff, flags, 0); return arch_get_unmapped_area(file, addr, len, pgoff, flags, 0); } EXPORT_SYMBOL(mm_get_unmapped_area); /** * find_vma_intersection() - Look up the first VMA which intersects the interval * @mm: The process address space. * @start_addr: The inclusive start user address. * @end_addr: The exclusive end user address. * * Returns: The first VMA within the provided range, %NULL otherwise. Assumes * start_addr < end_addr. */ struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, unsigned long start_addr, unsigned long end_addr) { unsigned long index = start_addr; mmap_assert_locked(mm); return mt_find(&mm->mm_mt, &index, end_addr - 1); } EXPORT_SYMBOL(find_vma_intersection); /** * find_vma() - Find the VMA for a given address, or the next VMA. * @mm: The mm_struct to check * @addr: The address * * Returns: The VMA associated with addr, or the next VMA. * May return %NULL in the case of no VMA at addr or above. */ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) { unsigned long index = addr; mmap_assert_locked(mm); return mt_find(&mm->mm_mt, &index, ULONG_MAX); } EXPORT_SYMBOL(find_vma); /** * find_vma_prev() - Find the VMA for a given address, or the next vma and * set %pprev to the previous VMA, if any. * @mm: The mm_struct to check * @addr: The address * @pprev: The pointer to set to the previous VMA * * Note that RCU lock is missing here since the external mmap_lock() is used * instead. * * Returns: The VMA associated with @addr, or the next vma. * May return %NULL in the case of no vma at addr or above. */ struct vm_area_struct * find_vma_prev(struct mm_struct *mm, unsigned long addr, struct vm_area_struct **pprev) { struct vm_area_struct *vma; VMA_ITERATOR(vmi, mm, addr); vma = vma_iter_load(&vmi); *pprev = vma_prev(&vmi); if (!vma) vma = vma_next(&vmi); return vma; } /* enforced gap between the expanding stack and other mappings. */ unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT; static int __init cmdline_parse_stack_guard_gap(char *p) { unsigned long val; char *endptr; val = simple_strtoul(p, &endptr, 10); if (!*endptr) stack_guard_gap = val << PAGE_SHIFT; return 1; } __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap); #ifdef CONFIG_STACK_GROWSUP int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) { return expand_upwards(vma, address); } struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma, *prev; addr &= PAGE_MASK; vma = find_vma_prev(mm, addr, &prev); if (vma && (vma->vm_start <= addr)) return vma; if (!prev) return NULL; if (expand_stack_locked(prev, addr)) return NULL; if (prev->vm_flags & VM_LOCKED) populate_vma_page_range(prev, addr, prev->vm_end, NULL); return prev; } #else int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) { return expand_downwards(vma, address); } struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma; unsigned long start; addr &= PAGE_MASK; vma = find_vma(mm, addr); if (!vma) return NULL; if (vma->vm_start <= addr) return vma; start = vma->vm_start; if (expand_stack_locked(vma, addr)) return NULL; if (vma->vm_flags & VM_LOCKED) populate_vma_page_range(vma, addr, start, NULL); return vma; } #endif #if defined(CONFIG_STACK_GROWSUP) #define vma_expand_up(vma,addr) expand_upwards(vma, addr) #define vma_expand_down(vma, addr) (-EFAULT) #else #define vma_expand_up(vma,addr) (-EFAULT) #define vma_expand_down(vma, addr) expand_downwards(vma, addr) #endif /* * expand_stack(): legacy interface for page faulting. Don't use unless * you have to. * * This is called with the mm locked for reading, drops the lock, takes * the lock for writing, tries to look up a vma again, expands it if * necessary, and downgrades the lock to reading again. * * If no vma is found or it can't be expanded, it returns NULL and has * dropped the lock. */ struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma, *prev; mmap_read_unlock(mm); if (mmap_write_lock_killable(mm)) return NULL; vma = find_vma_prev(mm, addr, &prev); if (vma && vma->vm_start <= addr) goto success; if (prev && !vma_expand_up(prev, addr)) { vma = prev; goto success; } if (vma && !vma_expand_down(vma, addr)) goto success; mmap_write_unlock(mm); return NULL; success: mmap_write_downgrade(mm); return vma; } /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls. * @mm: The mm_struct * @start: The start address to munmap * @len: The length to be munmapped. * @uf: The userfaultfd list_head * * Return: 0 on success, error otherwise. */ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf) { VMA_ITERATOR(vmi, mm, start); return do_vmi_munmap(&vmi, mm, start, len, uf, false); } int vm_munmap(unsigned long start, size_t len) { return __vm_munmap(start, len, false); } EXPORT_SYMBOL(vm_munmap); SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) { addr = untagged_addr(addr); return __vm_munmap(addr, len, true); } /* * Emulation of deprecated remap_file_pages() syscall. */ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, unsigned long, prot, unsigned long, pgoff, unsigned long, flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long populate = 0; unsigned long ret = -EINVAL; struct file *file; vm_flags_t vm_flags; pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n", current->comm, current->pid); if (prot) return ret; start = start & PAGE_MASK; size = size & PAGE_MASK; if (start + size <= start) return ret; /* Does pgoff wrap? */ if (pgoff + (size >> PAGE_SHIFT) < pgoff) return ret; if (mmap_read_lock_killable(mm)) return -EINTR; /* * Look up VMA under read lock first so we can perform the security * without holding locks (which can be problematic). We reacquire a * write lock later and check nothing changed underneath us. */ vma = vma_lookup(mm, start); if (!vma || !(vma->vm_flags & VM_SHARED)) { mmap_read_unlock(mm); return -EINVAL; } prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; flags &= MAP_NONBLOCK; flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; if (vma->vm_flags & VM_LOCKED) flags |= MAP_LOCKED; /* Save vm_flags used to calculate prot and flags, and recheck later. */ vm_flags = vma->vm_flags; file = get_file(vma->vm_file); mmap_read_unlock(mm); /* Call outside mmap_lock to be consistent with other callers. */ ret = security_mmap_file(file, prot, flags); if (ret) { fput(file); return ret; } ret = -EINVAL; /* OK security check passed, take write lock + let it rip. */ if (mmap_write_lock_killable(mm)) { fput(file); return -EINTR; } vma = vma_lookup(mm, start); if (!vma) goto out; /* Make sure things didn't change under us. */ if (vma->vm_flags != vm_flags) goto out; if (vma->vm_file != file) goto out; if (start + size > vma->vm_end) { VMA_ITERATOR(vmi, mm, vma->vm_end); struct vm_area_struct *next, *prev = vma; for_each_vma_range(vmi, next, start + size) { /* hole between vmas ? */ if (next->vm_start != prev->vm_end) goto out; if (next->vm_file != vma->vm_file) goto out; if (next->vm_flags != vma->vm_flags) goto out; if (start + size <= next->vm_end) break; prev = next; } if (!next) goto out; } ret = do_mmap(vma->vm_file, start, size, prot, flags, 0, pgoff, &populate, NULL); out: mmap_write_unlock(mm); fput(file); if (populate) mm_populate(ret, populate); if (!IS_ERR_VALUE(ret)) ret = 0; return ret; } int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma = NULL; unsigned long len; int ret; bool populate; LIST_HEAD(uf); VMA_ITERATOR(vmi, mm, addr); len = PAGE_ALIGN(request); if (len < request) return -ENOMEM; if (!len) return 0; /* Until we need other flags, refuse anything except VM_EXEC. */ if ((flags & (~VM_EXEC)) != 0) return -EINVAL; if (mmap_write_lock_killable(mm)) return -EINTR; ret = check_brk_limits(addr, len); if (ret) goto limits_failed; ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0); if (ret) goto munmap_failed; vma = vma_prev(&vmi); ret = do_brk_flags(&vmi, vma, addr, len, flags); populate = ((mm->def_flags & VM_LOCKED) != 0); mmap_write_unlock(mm); userfaultfd_unmap_complete(mm, &uf); if (populate && !ret) mm_populate(addr, len); return ret; munmap_failed: limits_failed: mmap_write_unlock(mm); return ret; } EXPORT_SYMBOL(vm_brk_flags); /* Release all mmaps. */ void exit_mmap(struct mm_struct *mm) { struct mmu_gather tlb; struct vm_area_struct *vma; unsigned long nr_accounted = 0; VMA_ITERATOR(vmi, mm, 0); int count = 0; /* mm's last user has gone, and its about to be pulled down */ mmu_notifier_release(mm); mmap_read_lock(mm); arch_exit_mmap(mm); vma = vma_next(&vmi); if (!vma || unlikely(xa_is_zero(vma))) { /* Can happen if dup_mmap() received an OOM */ mmap_read_unlock(mm); mmap_write_lock(mm); goto destroy; } flush_cache_mm(mm); tlb_gather_mmu_fullmm(&tlb, mm); /* update_hiwater_rss(mm) here? but nobody should be looking */ /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */ unmap_vmas(&tlb, &vmi.mas, vma, 0, ULONG_MAX, ULONG_MAX, false); mmap_read_unlock(mm); /* * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper * because the memory has been already freed. */ set_bit(MMF_OOM_SKIP, &mm->flags); mmap_write_lock(mm); mt_clear_in_rcu(&mm->mm_mt); vma_iter_set(&vmi, vma->vm_end); free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING, true); tlb_finish_mmu(&tlb); /* * Walk the list again, actually closing and freeing it, with preemption * enabled, without holding any MM locks besides the unreachable * mmap_write_lock. */ vma_iter_set(&vmi, vma->vm_end); do { if (vma->vm_flags & VM_ACCOUNT) nr_accounted += vma_pages(vma); vma_mark_detached(vma); remove_vma(vma); count++; cond_resched(); vma = vma_next(&vmi); } while (vma && likely(!xa_is_zero(vma))); BUG_ON(count != mm->map_count); trace_exit_mmap(mm); destroy: __mt_destroy(&mm->mm_mt); mmap_write_unlock(mm); vm_unacct_memory(nr_accounted); } /* Insert vm structure into process list sorted by address * and into the inode's i_mmap tree. If vm_file is non-NULL * then i_mmap_rwsem is taken here. */ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) { unsigned long charged = vma_pages(vma); if (find_vma_intersection(mm, vma->vm_start, vma->vm_end)) return -ENOMEM; if ((vma->vm_flags & VM_ACCOUNT) && security_vm_enough_memory_mm(mm, charged)) return -ENOMEM; /* * The vm_pgoff of a purely anonymous vma should be irrelevant * until its first write fault, when page's anon_vma and index * are set. But now set the vm_pgoff it will almost certainly * end up with (unless mremap moves it elsewhere before that * first wfault), so /proc/pid/maps tells a consistent story. * * By setting it to reflect the virtual start address of the * vma, merges and splits can happen in a seamless way, just * using the existing file pgoff checks and manipulations. * Similarly in do_mmap and in do_brk_flags. */ if (vma_is_anonymous(vma)) { BUG_ON(vma->anon_vma); vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; } if (vma_link(mm, vma)) { if (vma->vm_flags & VM_ACCOUNT) vm_unacct_memory(charged); return -ENOMEM; } return 0; } /* * Return true if the calling process may expand its vm space by the passed * number of pages */ bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) { if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) return false; if (is_data_mapping(flags) && mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { /* Workaround for Valgrind */ if (rlimit(RLIMIT_DATA) == 0 && mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT) return true; pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n", current->comm, current->pid, (mm->data_vm + npages) << PAGE_SHIFT, rlimit(RLIMIT_DATA), ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data"); if (!ignore_rlimit_data) return false; } return true; } void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) { WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages); if (is_exec_mapping(flags)) mm->exec_vm += npages; else if (is_stack_mapping(flags)) mm->stack_vm += npages; else if (is_data_mapping(flags)) mm->data_vm += npages; } static vm_fault_t special_mapping_fault(struct vm_fault *vmf); /* * Close hook, called for unmap() and on the old vma for mremap(). * * Having a close hook prevents vma merging regardless of flags. */ static void special_mapping_close(struct vm_area_struct *vma) { const struct vm_special_mapping *sm = vma->vm_private_data; if (sm->close) sm->close(sm, vma); } static const char *special_mapping_name(struct vm_area_struct *vma) { return ((struct vm_special_mapping *)vma->vm_private_data)->name; } static int special_mapping_mremap(struct vm_area_struct *new_vma) { struct vm_special_mapping *sm = new_vma->vm_private_data; if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) return -EFAULT; if (sm->mremap) return sm->mremap(sm, new_vma); return 0; } static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr) { /* * Forbid splitting special mappings - kernel has expectations over * the number of pages in mapping. Together with VM_DONTEXPAND * the size of vma should stay the same over the special mapping's * lifetime. */ return -EINVAL; } static const struct vm_operations_struct special_mapping_vmops = { .close = special_mapping_close, .fault = special_mapping_fault, .mremap = special_mapping_mremap, .name = special_mapping_name, /* vDSO code relies that VVAR can't be accessed remotely */ .access = NULL, .may_split = special_mapping_split, }; static vm_fault_t special_mapping_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; pgoff_t pgoff; struct page **pages; struct vm_special_mapping *sm = vma->vm_private_data; if (sm->fault) return sm->fault(sm, vmf->vma, vmf); pages = sm->pages; for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) pgoff--; if (*pages) { struct page *page = *pages; get_page(page); vmf->page = page; return 0; } return VM_FAULT_SIGBUS; } static struct vm_area_struct *__install_special_mapping( struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, void *priv, const struct vm_operations_struct *ops) { int ret; struct vm_area_struct *vma; vma = vm_area_alloc(mm); if (unlikely(vma == NULL)) return ERR_PTR(-ENOMEM); vma_set_range(vma, addr, addr + len, 0); vm_flags_init(vma, (vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK); vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); vma->vm_ops = ops; vma->vm_private_data = priv; ret = insert_vm_struct(mm, vma); if (ret) goto out; vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); perf_event_mmap(vma); return vma; out: vm_area_free(vma); return ERR_PTR(ret); } bool vma_is_special_mapping(const struct vm_area_struct *vma, const struct vm_special_mapping *sm) { return vma->vm_private_data == sm && vma->vm_ops == &special_mapping_vmops; } /* * Called with mm->mmap_lock held for writing. * Insert a new vma covering the given region, with the given flags. * Its pages are supplied by the given array of struct page *. * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. * The region past the last page supplied will always produce SIGBUS. * The array pointer and the pages it points to are assumed to stay alive * for as long as this mapping might exist. */ struct vm_area_struct *_install_special_mapping( struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, const struct vm_special_mapping *spec) { return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec, &special_mapping_vmops); } #ifdef CONFIG_SYSCTL #if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \ defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT) int sysctl_legacy_va_layout; #endif static const struct ctl_table mmap_table[] = { { .procname = "max_map_count", .data = &sysctl_max_map_count, .maxlen = sizeof(sysctl_max_map_count), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, }, #if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \ defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT) { .procname = "legacy_va_layout", .data = &sysctl_legacy_va_layout, .maxlen = sizeof(sysctl_legacy_va_layout), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, }, #endif #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS { .procname = "mmap_rnd_bits", .data = &mmap_rnd_bits, .maxlen = sizeof(mmap_rnd_bits), .mode = 0600, .proc_handler = proc_dointvec_minmax, .extra1 = (void *)&mmap_rnd_bits_min, .extra2 = (void *)&mmap_rnd_bits_max, }, #endif #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS { .procname = "mmap_rnd_compat_bits", .data = &mmap_rnd_compat_bits, .maxlen = sizeof(mmap_rnd_compat_bits), .mode = 0600, .proc_handler = proc_dointvec_minmax, .extra1 = (void *)&mmap_rnd_compat_bits_min, .extra2 = (void *)&mmap_rnd_compat_bits_max, }, #endif }; #endif /* CONFIG_SYSCTL */ /* * initialise the percpu counter for VM */ void __init mmap_init(void) { int ret; ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); VM_BUG_ON(ret); #ifdef CONFIG_SYSCTL register_sysctl_init("vm", mmap_table); #endif } /* * Initialise sysctl_user_reserve_kbytes. * * This is intended to prevent a user from starting a single memory hogging * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER * mode. * * The default value is min(3% of free memory, 128MB) * 128MB is enough to recover with sshd/login, bash, and top/kill. */ static int init_user_reserve(void) { unsigned long free_kbytes; free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); sysctl_user_reserve_kbytes = min(free_kbytes / 32, SZ_128K); return 0; } subsys_initcall(init_user_reserve); /* * Initialise sysctl_admin_reserve_kbytes. * * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin * to log in and kill a memory hogging process. * * Systems with more than 256MB will reserve 8MB, enough to recover * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will * only reserve 3% of free pages by default. */ static int init_admin_reserve(void) { unsigned long free_kbytes; free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); sysctl_admin_reserve_kbytes = min(free_kbytes / 32, SZ_8K); return 0; } subsys_initcall(init_admin_reserve); /* * Reinititalise user and admin reserves if memory is added or removed. * * The default user reserve max is 128MB, and the default max for the * admin reserve is 8MB. These are usually, but not always, enough to * enable recovery from a memory hogging process using login/sshd, a shell, * and tools like top. It may make sense to increase or even disable the * reserve depending on the existence of swap or variations in the recovery * tools. So, the admin may have changed them. * * If memory is added and the reserves have been eliminated or increased above * the default max, then we'll trust the admin. * * If memory is removed and there isn't enough free memory, then we * need to reset the reserves. * * Otherwise keep the reserve set by the admin. */ static int reserve_mem_notifier(struct notifier_block *nb, unsigned long action, void *data) { unsigned long tmp, free_kbytes; switch (action) { case MEM_ONLINE: /* Default max is 128MB. Leave alone if modified by operator. */ tmp = sysctl_user_reserve_kbytes; if (tmp > 0 && tmp < SZ_128K) init_user_reserve(); /* Default max is 8MB. Leave alone if modified by operator. */ tmp = sysctl_admin_reserve_kbytes; if (tmp > 0 && tmp < SZ_8K) init_admin_reserve(); break; case MEM_OFFLINE: free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); if (sysctl_user_reserve_kbytes > free_kbytes) { init_user_reserve(); pr_info("vm.user_reserve_kbytes reset to %lu\n", sysctl_user_reserve_kbytes); } if (sysctl_admin_reserve_kbytes > free_kbytes) { init_admin_reserve(); pr_info("vm.admin_reserve_kbytes reset to %lu\n", sysctl_admin_reserve_kbytes); } break; default: break; } return NOTIFY_OK; } static int __meminit init_reserve_notifier(void) { if (hotplug_memory_notifier(reserve_mem_notifier, DEFAULT_CALLBACK_PRI)) pr_err("Failed registering memory add/remove notifier for admin reserve\n"); return 0; } subsys_initcall(init_reserve_notifier); /* * Relocate a VMA downwards by shift bytes. There cannot be any VMAs between * this VMA and its relocated range, which will now reside at [vma->vm_start - * shift, vma->vm_end - shift). * * This function is almost certainly NOT what you want for anything other than * early executable temporary stack relocation. */ int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift) { /* * The process proceeds as follows: * * 1) Use shift to calculate the new vma endpoints. * 2) Extend vma to cover both the old and new ranges. This ensures the * arguments passed to subsequent functions are consistent. * 3) Move vma's page tables to the new range. * 4) Free up any cleared pgd range. * 5) Shrink the vma to cover only the new range. */ struct mm_struct *mm = vma->vm_mm; unsigned long old_start = vma->vm_start; unsigned long old_end = vma->vm_end; unsigned long length = old_end - old_start; unsigned long new_start = old_start - shift; unsigned long new_end = old_end - shift; VMA_ITERATOR(vmi, mm, new_start); VMG_STATE(vmg, mm, &vmi, new_start, old_end, 0, vma->vm_pgoff); struct vm_area_struct *next; struct mmu_gather tlb; PAGETABLE_MOVE(pmc, vma, vma, old_start, new_start, length); BUG_ON(new_start > new_end); /* * ensure there are no vmas between where we want to go * and where we are */ if (vma != vma_next(&vmi)) return -EFAULT; vma_iter_prev_range(&vmi); /* * cover the whole range: [new_start, old_end) */ vmg.middle = vma; if (vma_expand(&vmg)) return -ENOMEM; /* * move the page tables downwards, on failure we rely on * process cleanup to remove whatever mess we made. */ pmc.for_stack = true; if (length != move_page_tables(&pmc)) return -ENOMEM; tlb_gather_mmu(&tlb, mm); next = vma_next(&vmi); if (new_end > old_start) { /* * when the old and new regions overlap clear from new_end. */ free_pgd_range(&tlb, new_end, old_end, new_end, next ? next->vm_start : USER_PGTABLES_CEILING); } else { /* * otherwise, clean from old_start; this is done to not touch * the address space in [new_end, old_start) some architectures * have constraints on va-space that make this illegal (IA64) - * for the others its just a little faster. */ free_pgd_range(&tlb, old_start, old_end, new_end, next ? next->vm_start : USER_PGTABLES_CEILING); } tlb_finish_mmu(&tlb); vma_prev(&vmi); /* Shrink the vma to just the new range */ return vma_shrink(&vmi, vma, new_start, new_end, vma->vm_pgoff); } #ifdef CONFIG_MMU /* * Obtain a read lock on mm->mmap_lock, if the specified address is below the * start of the VMA, the intent is to perform a write, and it is a * downward-growing stack, then attempt to expand the stack to contain it. * * This function is intended only for obtaining an argument page from an ELF * image, and is almost certainly NOT what you want to use for any other * purpose. * * IMPORTANT - VMA fields are accessed without an mmap lock being held, so the * VMA referenced must not be linked in any user-visible tree, i.e. it must be a * new VMA being mapped. * * The function assumes that addr is either contained within the VMA or below * it, and makes no attempt to validate this value beyond that. * * Returns true if the read lock was obtained and a stack was perhaps expanded, * false if the stack expansion failed. * * On stack expansion the function temporarily acquires an mmap write lock * before downgrading it. */ bool mmap_read_lock_maybe_expand(struct mm_struct *mm, struct vm_area_struct *new_vma, unsigned long addr, bool write) { if (!write || addr >= new_vma->vm_start) { mmap_read_lock(mm); return true; } if (!(new_vma->vm_flags & VM_GROWSDOWN)) return false; mmap_write_lock(mm); if (expand_downwards(new_vma, addr)) { mmap_write_unlock(mm); return false; } mmap_write_downgrade(mm); return true; } #else bool mmap_read_lock_maybe_expand(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, bool write) { return false; } #endif
11053 7 65 5219 65 21014 642 16445 15528 13133 10791 8753 8468 794 6376 6353 11106 10526 874 877 3619 3633 79 117 19488 1612 1613 299 1222 206 29 29 29 29 102 35 217 484 426 189 127 607 692 195 178 6724 9887 9573 1961 5727 70 37 39 4130 572 1148 3199 3537 3538 580 8196 11941 3 1 341 3024 39 168 786 6 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_LIST_H #define _LINUX_LIST_H #include <linux/container_of.h> #include <linux/types.h> #include <linux/stddef.h> #include <linux/poison.h> #include <linux/const.h> #include <asm/barrier.h> /* * Circular doubly linked list implementation. * * Some of the internal functions ("__xxx") are useful when * manipulating whole lists rather than single entries, as * sometimes we already know the next/prev entries and we can * generate better code by using them directly rather than * using the generic single-entry routines. */ #define LIST_HEAD_INIT(name) { &(name), &(name) } #define LIST_HEAD(name) \ struct list_head name = LIST_HEAD_INIT(name) /** * INIT_LIST_HEAD - Initialize a list_head structure * @list: list_head structure to be initialized. * * Initializes the list_head to point to itself. If it is a list header, * the result is an empty list. */ static inline void INIT_LIST_HEAD(struct list_head *list) { WRITE_ONCE(list->next, list); WRITE_ONCE(list->prev, list); } #ifdef CONFIG_LIST_HARDENED #ifdef CONFIG_DEBUG_LIST # define __list_valid_slowpath #else # define __list_valid_slowpath __cold __preserve_most #endif /* * Performs the full set of list corruption checks before __list_add(). * On list corruption reports a warning, and returns false. */ extern bool __list_valid_slowpath __list_add_valid_or_report(struct list_head *new, struct list_head *prev, struct list_head *next); /* * Performs list corruption checks before __list_add(). Returns false if a * corruption is detected, true otherwise. * * With CONFIG_LIST_HARDENED only, performs minimal list integrity checking * inline to catch non-faulting corruptions, and only if a corruption is * detected calls the reporting function __list_add_valid_or_report(). */ static __always_inline bool __list_add_valid(struct list_head *new, struct list_head *prev, struct list_head *next) { bool ret = true; if (!IS_ENABLED(CONFIG_DEBUG_LIST)) { /* * With the hardening version, elide checking if next and prev * are NULL, since the immediate dereference of them below would * result in a fault if NULL. * * With the reduced set of checks, we can afford to inline the * checks, which also gives the compiler a chance to elide some * of them completely if they can be proven at compile-time. If * one of the pre-conditions does not hold, the slow-path will * show a report which pre-condition failed. */ if (likely(next->prev == prev && prev->next == next && new != prev && new != next)) return true; ret = false; } ret &= __list_add_valid_or_report(new, prev, next); return ret; } /* * Performs the full set of list corruption checks before __list_del_entry(). * On list corruption reports a warning, and returns false. */ extern bool __list_valid_slowpath __list_del_entry_valid_or_report(struct list_head *entry); /* * Performs list corruption checks before __list_del_entry(). Returns false if a * corruption is detected, true otherwise. * * With CONFIG_LIST_HARDENED only, performs minimal list integrity checking * inline to catch non-faulting corruptions, and only if a corruption is * detected calls the reporting function __list_del_entry_valid_or_report(). */ static __always_inline bool __list_del_entry_valid(struct list_head *entry) { bool ret = true; if (!IS_ENABLED(CONFIG_DEBUG_LIST)) { struct list_head *prev = entry->prev; struct list_head *next = entry->next; /* * With the hardening version, elide checking if next and prev * are NULL, LIST_POISON1 or LIST_POISON2, since the immediate * dereference of them below would result in a fault. */ if (likely(prev->next == entry && next->prev == entry)) return true; ret = false; } ret &= __list_del_entry_valid_or_report(entry); return ret; } #else static inline bool __list_add_valid(struct list_head *new, struct list_head *prev, struct list_head *next) { return true; } static inline bool __list_del_entry_valid(struct list_head *entry) { return true; } #endif /* * Insert a new entry between two known consecutive entries. * * This is only for internal list manipulation where we know * the prev/next entries already! */ static inline void __list_add(struct list_head *new, struct list_head *prev, struct list_head *next) { if (!__list_add_valid(new, prev, next)) return; next->prev = new; new->next = next; new->prev = prev; WRITE_ONCE(prev->next, new); } /** * list_add - add a new entry * @new: new entry to be added * @head: list head to add it after * * Insert a new entry after the specified head. * This is good for implementing stacks. */ static inline void list_add(struct list_head *new, struct list_head *head) { __list_add(new, head, head->next); } /** * list_add_tail - add a new entry * @new: new entry to be added * @head: list head to add it before * * Insert a new entry before the specified head. * This is useful for implementing queues. */ static inline void list_add_tail(struct list_head *new, struct list_head *head) { __list_add(new, head->prev, head); } /* * Delete a list entry by making the prev/next entries * point to each other. * * This is only for internal list manipulation where we know * the prev/next entries already! */ static inline void __list_del(struct list_head * prev, struct list_head * next) { next->prev = prev; WRITE_ONCE(prev->next, next); } /* * Delete a list entry and clear the 'prev' pointer. * * This is a special-purpose list clearing method used in the networking code * for lists allocated as per-cpu, where we don't want to incur the extra * WRITE_ONCE() overhead of a regular list_del_init(). The code that uses this * needs to check the node 'prev' pointer instead of calling list_empty(). */ static inline void __list_del_clearprev(struct list_head *entry) { __list_del(entry->prev, entry->next); entry->prev = NULL; } static inline void __list_del_entry(struct list_head *entry) { if (!__list_del_entry_valid(entry)) return; __list_del(entry->prev, entry->next); } /** * list_del - deletes entry from list. * @entry: the element to delete from the list. * Note: list_empty() on entry does not return true after this, the entry is * in an undefined state. */ static inline void list_del(struct list_head *entry) { __list_del_entry(entry); entry->next = LIST_POISON1; entry->prev = LIST_POISON2; } /** * list_replace - replace old entry by new one * @old : the element to be replaced * @new : the new element to insert * * If @old was empty, it will be overwritten. */ static inline void list_replace(struct list_head *old, struct list_head *new) { new->next = old->next; new->next->prev = new; new->prev = old->prev; new->prev->next = new; } /** * list_replace_init - replace old entry by new one and initialize the old one * @old : the element to be replaced * @new : the new element to insert * * If @old was empty, it will be overwritten. */ static inline void list_replace_init(struct list_head *old, struct list_head *new) { list_replace(old, new); INIT_LIST_HEAD(old); } /** * list_swap - replace entry1 with entry2 and re-add entry1 at entry2's position * @entry1: the location to place entry2 * @entry2: the location to place entry1 */ static inline void list_swap(struct list_head *entry1, struct list_head *entry2) { struct list_head *pos = entry2->prev; list_del(entry2); list_replace(entry1, entry2); if (pos == entry1) pos = entry2; list_add(entry1, pos); } /** * list_del_init - deletes entry from list and reinitialize it. * @entry: the element to delete from the list. */ static inline void list_del_init(struct list_head *entry) { __list_del_entry(entry); INIT_LIST_HEAD(entry); } /** * list_move - delete from one list and add as another's head * @list: the entry to move * @head: the head that will precede our entry */ static inline void list_move(struct list_head *list, struct list_head *head) { __list_del_entry(list); list_add(list, head); } /** * list_move_tail - delete from one list and add as another's tail * @list: the entry to move * @head: the head that will follow our entry */ static inline void list_move_tail(struct list_head *list, struct list_head *head) { __list_del_entry(list); list_add_tail(list, head); } /** * list_bulk_move_tail - move a subsection of a list to its tail * @head: the head that will follow our entry * @first: first entry to move * @last: last entry to move, can be the same as first * * Move all entries between @first and including @last before @head. * All three entries must belong to the same linked list. */ static inline void list_bulk_move_tail(struct list_head *head, struct list_head *first, struct list_head *last) { first->prev->next = last->next; last->next->prev = first->prev; head->prev->next = first; first->prev = head->prev; last->next = head; head->prev = last; } /** * list_is_first -- tests whether @list is the first entry in list @head * @list: the entry to test * @head: the head of the list */ static inline int list_is_first(const struct list_head *list, const struct list_head *head) { return list->prev == head; } /** * list_is_last - tests whether @list is the last entry in list @head * @list: the entry to test * @head: the head of the list */ static inline int list_is_last(const struct list_head *list, const struct list_head *head) { return list->next == head; } /** * list_is_head - tests whether @list is the list @head * @list: the entry to test * @head: the head of the list */ static inline int list_is_head(const struct list_head *list, const struct list_head *head) { return list == head; } /** * list_empty - tests whether a list is empty * @head: the list to test. */ static inline int list_empty(const struct list_head *head) { return READ_ONCE(head->next) == head; } /** * list_del_init_careful - deletes entry from list and reinitialize it. * @entry: the element to delete from the list. * * This is the same as list_del_init(), except designed to be used * together with list_empty_careful() in a way to guarantee ordering * of other memory operations. * * Any memory operations done before a list_del_init_careful() are * guaranteed to be visible after a list_empty_careful() test. */ static inline void list_del_init_careful(struct list_head *entry) { __list_del_entry(entry); WRITE_ONCE(entry->prev, entry); smp_store_release(&entry->next, entry); } /** * list_empty_careful - tests whether a list is empty and not being modified * @head: the list to test * * Description: * tests whether a list is empty _and_ checks that no other CPU might be * in the process of modifying either member (next or prev) * * NOTE: using list_empty_careful() without synchronization * can only be safe if the only activity that can happen * to the list entry is list_del_init(). Eg. it cannot be used * if another CPU could re-list_add() it. */ static inline int list_empty_careful(const struct list_head *head) { struct list_head *next = smp_load_acquire(&head->next); return list_is_head(next, head) && (next == READ_ONCE(head->prev)); } /** * list_rotate_left - rotate the list to the left * @head: the head of the list */ static inline void list_rotate_left(struct list_head *head) { struct list_head *first; if (!list_empty(head)) { first = head->next; list_move_tail(first, head); } } /** * list_rotate_to_front() - Rotate list to specific item. * @list: The desired new front of the list. * @head: The head of the list. * * Rotates list so that @list becomes the new front of the list. */ static inline void list_rotate_to_front(struct list_head *list, struct list_head *head) { /* * Deletes the list head from the list denoted by @head and * places it as the tail of @list, this effectively rotates the * list so that @list is at the front. */ list_move_tail(head, list); } /** * list_is_singular - tests whether a list has just one entry. * @head: the list to test. */ static inline int list_is_singular(const struct list_head *head) { return !list_empty(head) && (head->next == head->prev); } static inline void __list_cut_position(struct list_head *list, struct list_head *head, struct list_head *entry) { struct list_head *new_first = entry->next; list->next = head->next; list->next->prev = list; list->prev = entry; entry->next = list; head->next = new_first; new_first->prev = head; } /** * list_cut_position - cut a list into two * @list: a new list to add all removed entries * @head: a list with entries * @entry: an entry within head, could be the head itself * and if so we won't cut the list * * This helper moves the initial part of @head, up to and * including @entry, from @head to @list. You should * pass on @entry an element you know is on @head. @list * should be an empty list or a list you do not care about * losing its data. * */ static inline void list_cut_position(struct list_head *list, struct list_head *head, struct list_head *entry) { if (list_empty(head)) return; if (list_is_singular(head) && !list_is_head(entry, head) && (entry != head->next)) return; if (list_is_head(entry, head)) INIT_LIST_HEAD(list); else __list_cut_position(list, head, entry); } /** * list_cut_before - cut a list into two, before given entry * @list: a new list to add all removed entries * @head: a list with entries * @entry: an entry within head, could be the head itself * * This helper moves the initial part of @head, up to but * excluding @entry, from @head to @list. You should pass * in @entry an element you know is on @head. @list should * be an empty list or a list you do not care about losing * its data. * If @entry == @head, all entries on @head are moved to * @list. */ static inline void list_cut_before(struct list_head *list, struct list_head *head, struct list_head *entry) { if (head->next == entry) { INIT_LIST_HEAD(list); return; } list->next = head->next; list->next->prev = list; list->prev = entry->prev; list->prev->next = list; head->next = entry; entry->prev = head; } static inline void __list_splice(const struct list_head *list, struct list_head *prev, struct list_head *next) { struct list_head *first = list->next; struct list_head *last = list->prev; first->prev = prev; prev->next = first; last->next = next; next->prev = last; } /** * list_splice - join two lists, this is designed for stacks * @list: the new list to add. * @head: the place to add it in the first list. */ static inline void list_splice(const struct list_head *list, struct list_head *head) { if (!list_empty(list)) __list_splice(list, head, head->next); } /** * list_splice_tail - join two lists, each list being a queue * @list: the new list to add. * @head: the place to add it in the first list. */ static inline void list_splice_tail(struct list_head *list, struct list_head *head) { if (!list_empty(list)) __list_splice(list, head->prev, head); } /** * list_splice_init - join two lists and reinitialise the emptied list. * @list: the new list to add. * @head: the place to add it in the first list. * * The list at @list is reinitialised */ static inline void list_splice_init(struct list_head *list, struct list_head *head) { if (!list_empty(list)) { __list_splice(list, head, head->next); INIT_LIST_HEAD(list); } } /** * list_splice_tail_init - join two lists and reinitialise the emptied list * @list: the new list to add. * @head: the place to add it in the first list. * * Each of the lists is a queue. * The list at @list is reinitialised */ static inline void list_splice_tail_init(struct list_head *list, struct list_head *head) { if (!list_empty(list)) { __list_splice(list, head->prev, head); INIT_LIST_HEAD(list); } } /** * list_entry - get the struct for this entry * @ptr: the &struct list_head pointer. * @type: the type of the struct this is embedded in. * @member: the name of the list_head within the struct. */ #define list_entry(ptr, type, member) \ container_of(ptr, type, member) /** * list_first_entry - get the first element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. * @member: the name of the list_head within the struct. * * Note, that list is expected to be not empty. */ #define list_first_entry(ptr, type, member) \ list_entry((ptr)->next, type, member) /** * list_last_entry - get the last element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. * @member: the name of the list_head within the struct. * * Note, that list is expected to be not empty. */ #define list_last_entry(ptr, type, member) \ list_entry((ptr)->prev, type, member) /** * list_first_entry_or_null - get the first element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. * @member: the name of the list_head within the struct. * * Note that if the list is empty, it returns NULL. */ #define list_first_entry_or_null(ptr, type, member) ({ \ struct list_head *head__ = (ptr); \ struct list_head *pos__ = READ_ONCE(head__->next); \ pos__ != head__ ? list_entry(pos__, type, member) : NULL; \ }) /** * list_next_entry - get the next element in list * @pos: the type * to cursor * @member: the name of the list_head within the struct. */ #define list_next_entry(pos, member) \ list_entry((pos)->member.next, typeof(*(pos)), member) /** * list_next_entry_circular - get the next element in list * @pos: the type * to cursor. * @head: the list head to take the element from. * @member: the name of the list_head within the struct. * * Wraparound if pos is the last element (return the first element). * Note, that list is expected to be not empty. */ #define list_next_entry_circular(pos, head, member) \ (list_is_last(&(pos)->member, head) ? \ list_first_entry(head, typeof(*(pos)), member) : list_next_entry(pos, member)) /** * list_prev_entry - get the prev element in list * @pos: the type * to cursor * @member: the name of the list_head within the struct. */ #define list_prev_entry(pos, member) \ list_entry((pos)->member.prev, typeof(*(pos)), member) /** * list_prev_entry_circular - get the prev element in list * @pos: the type * to cursor. * @head: the list head to take the element from. * @member: the name of the list_head within the struct. * * Wraparound if pos is the first element (return the last element). * Note, that list is expected to be not empty. */ #define list_prev_entry_circular(pos, head, member) \ (list_is_first(&(pos)->member, head) ? \ list_last_entry(head, typeof(*(pos)), member) : list_prev_entry(pos, member)) /** * list_for_each - iterate over a list * @pos: the &struct list_head to use as a loop cursor. * @head: the head for your list. */ #define list_for_each(pos, head) \ for (pos = (head)->next; !list_is_head(pos, (head)); pos = pos->next) /** * list_for_each_rcu - Iterate over a list in an RCU-safe fashion * @pos: the &struct list_head to use as a loop cursor. * @head: the head for your list. */ #define list_for_each_rcu(pos, head) \ for (pos = rcu_dereference((head)->next); \ !list_is_head(pos, (head)); \ pos = rcu_dereference(pos->next)) /** * list_for_each_continue - continue iteration over a list * @pos: the &struct list_head to use as a loop cursor. * @head: the head for your list. * * Continue to iterate over a list, continuing after the current position. */ #define list_for_each_continue(pos, head) \ for (pos = pos->next; !list_is_head(pos, (head)); pos = pos->next) /** * list_for_each_prev - iterate over a list backwards * @pos: the &struct list_head to use as a loop cursor. * @head: the head for your list. */ #define list_for_each_prev(pos, head) \ for (pos = (head)->prev; !list_is_head(pos, (head)); pos = pos->prev) /** * list_for_each_safe - iterate over a list safe against removal of list entry * @pos: the &struct list_head to use as a loop cursor. * @n: another &struct list_head to use as temporary storage * @head: the head for your list. */ #define list_for_each_safe(pos, n, head) \ for (pos = (head)->next, n = pos->next; \ !list_is_head(pos, (head)); \ pos = n, n = pos->next) /** * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry * @pos: the &struct list_head to use as a loop cursor. * @n: another &struct list_head to use as temporary storage * @head: the head for your list. */ #define list_for_each_prev_safe(pos, n, head) \ for (pos = (head)->prev, n = pos->prev; \ !list_is_head(pos, (head)); \ pos = n, n = pos->prev) /** * list_count_nodes - count nodes in the list * @head: the head for your list. */ static inline size_t list_count_nodes(struct list_head *head) { struct list_head *pos; size_t count = 0; list_for_each(pos, head) count++; return count; } /** * list_entry_is_head - test if the entry points to the head of the list * @pos: the type * to cursor * @head: the head for your list. * @member: the name of the list_head within the struct. */ #define list_entry_is_head(pos, head, member) \ list_is_head(&pos->member, (head)) /** * list_for_each_entry - iterate over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_head within the struct. */ #define list_for_each_entry(pos, head, member) \ for (pos = list_first_entry(head, typeof(*pos), member); \ !list_entry_is_head(pos, head, member); \ pos = list_next_entry(pos, member)) /** * list_for_each_entry_reverse - iterate backwards over list of given type. * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_head within the struct. */ #define list_for_each_entry_reverse(pos, head, member) \ for (pos = list_last_entry(head, typeof(*pos), member); \ !list_entry_is_head(pos, head, member); \ pos = list_prev_entry(pos, member)) /** * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() * @pos: the type * to use as a start point * @head: the head of the list * @member: the name of the list_head within the struct. * * Prepares a pos entry for use as a start point in list_for_each_entry_continue(). */ #define list_prepare_entry(pos, head, member) \ ((pos) ? : list_entry(head, typeof(*pos), member)) /** * list_for_each_entry_continue - continue iteration over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_head within the struct. * * Continue to iterate over list of given type, continuing after * the current position. */ #define list_for_each_entry_continue(pos, head, member) \ for (pos = list_next_entry(pos, member); \ !list_entry_is_head(pos, head, member); \ pos = list_next_entry(pos, member)) /** * list_for_each_entry_continue_reverse - iterate backwards from the given point * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_head within the struct. * * Start to iterate over list of given type backwards, continuing after * the current position. */ #define list_for_each_entry_continue_reverse(pos, head, member) \ for (pos = list_prev_entry(pos, member); \ !list_entry_is_head(pos, head, member); \ pos = list_prev_entry(pos, member)) /** * list_for_each_entry_from - iterate over list of given type from the current point * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_head within the struct. * * Iterate over list of given type, continuing from current position. */ #define list_for_each_entry_from(pos, head, member) \ for (; !list_entry_is_head(pos, head, member); \ pos = list_next_entry(pos, member)) /** * list_for_each_entry_from_reverse - iterate backwards over list of given type * from the current point * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_head within the struct. * * Iterate backwards over list of given type, continuing from current position. */ #define list_for_each_entry_from_reverse(pos, head, member) \ for (; !list_entry_is_head(pos, head, member); \ pos = list_prev_entry(pos, member)) /** * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. * @member: the name of the list_head within the struct. */ #define list_for_each_entry_safe(pos, n, head, member) \ for (pos = list_first_entry(head, typeof(*pos), member), \ n = list_next_entry(pos, member); \ !list_entry_is_head(pos, head, member); \ pos = n, n = list_next_entry(n, member)) /** * list_for_each_entry_safe_continue - continue list iteration safe against removal * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. * @member: the name of the list_head within the struct. * * Iterate over list of given type, continuing after current point, * safe against removal of list entry. */ #define list_for_each_entry_safe_continue(pos, n, head, member) \ for (pos = list_next_entry(pos, member), \ n = list_next_entry(pos, member); \ !list_entry_is_head(pos, head, member); \ pos = n, n = list_next_entry(n, member)) /** * list_for_each_entry_safe_from - iterate over list from current point safe against removal * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. * @member: the name of the list_head within the struct. * * Iterate over list of given type from current point, safe against * removal of list entry. */ #define list_for_each_entry_safe_from(pos, n, head, member) \ for (n = list_next_entry(pos, member); \ !list_entry_is_head(pos, head, member); \ pos = n, n = list_next_entry(n, member)) /** * list_for_each_entry_safe_reverse - iterate backwards over list safe against removal * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. * @member: the name of the list_head within the struct. * * Iterate backwards over list of given type, safe against removal * of list entry. */ #define list_for_each_entry_safe_reverse(pos, n, head, member) \ for (pos = list_last_entry(head, typeof(*pos), member), \ n = list_prev_entry(pos, member); \ !list_entry_is_head(pos, head, member); \ pos = n, n = list_prev_entry(n, member)) /** * list_safe_reset_next - reset a stale list_for_each_entry_safe loop * @pos: the loop cursor used in the list_for_each_entry_safe loop * @n: temporary storage used in list_for_each_entry_safe * @member: the name of the list_head within the struct. * * list_safe_reset_next is not safe to use in general if the list may be * modified concurrently (eg. the lock is dropped in the loop body). An * exception to this is if the cursor element (pos) is pinned in the list, * and list_safe_reset_next is called after re-taking the lock and before * completing the current iteration of the loop body. */ #define list_safe_reset_next(pos, n, member) \ n = list_next_entry(pos, member) /* * Double linked lists with a single pointer list head. * Mostly useful for hash tables where the two pointer list head is * too wasteful. * You lose the ability to access the tail in O(1). */ #define HLIST_HEAD_INIT { .first = NULL } #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) static inline void INIT_HLIST_NODE(struct hlist_node *h) { h->next = NULL; h->pprev = NULL; } /** * hlist_unhashed - Has node been removed from list and reinitialized? * @h: Node to be checked * * Not that not all removal functions will leave a node in unhashed * state. For example, hlist_nulls_del_init_rcu() does leave the * node in unhashed state, but hlist_nulls_del() does not. */ static inline int hlist_unhashed(const struct hlist_node *h) { return !h->pprev; } /** * hlist_unhashed_lockless - Version of hlist_unhashed for lockless use * @h: Node to be checked * * This variant of hlist_unhashed() must be used in lockless contexts * to avoid potential load-tearing. The READ_ONCE() is paired with the * various WRITE_ONCE() in hlist helpers that are defined below. */ static inline int hlist_unhashed_lockless(const struct hlist_node *h) { return !READ_ONCE(h->pprev); } /** * hlist_empty - Is the specified hlist_head structure an empty hlist? * @h: Structure to check. */ static inline int hlist_empty(const struct hlist_head *h) { return !READ_ONCE(h->first); } static inline void __hlist_del(struct hlist_node *n) { struct hlist_node *next = n->next; struct hlist_node **pprev = n->pprev; WRITE_ONCE(*pprev, next); if (next) WRITE_ONCE(next->pprev, pprev); } /** * hlist_del - Delete the specified hlist_node from its list * @n: Node to delete. * * Note that this function leaves the node in hashed state. Use * hlist_del_init() or similar instead to unhash @n. */ static inline void hlist_del(struct hlist_node *n) { __hlist_del(n); n->next = LIST_POISON1; n->pprev = LIST_POISON2; } /** * hlist_del_init - Delete the specified hlist_node from its list and initialize * @n: Node to delete. * * Note that this function leaves the node in unhashed state. */ static inline void hlist_del_init(struct hlist_node *n) { if (!hlist_unhashed(n)) { __hlist_del(n); INIT_HLIST_NODE(n); } } /** * hlist_add_head - add a new entry at the beginning of the hlist * @n: new entry to be added * @h: hlist head to add it after * * Insert a new entry after the specified head. * This is good for implementing stacks. */ static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) { struct hlist_node *first = h->first; WRITE_ONCE(n->next, first); if (first) WRITE_ONCE(first->pprev, &n->next); WRITE_ONCE(h->first, n); WRITE_ONCE(n->pprev, &h->first); } /** * hlist_add_before - add a new entry before the one specified * @n: new entry to be added * @next: hlist node to add it before, which must be non-NULL */ static inline void hlist_add_before(struct hlist_node *n, struct hlist_node *next) { WRITE_ONCE(n->pprev, next->pprev); WRITE_ONCE(n->next, next); WRITE_ONCE(next->pprev, &n->next); WRITE_ONCE(*(n->pprev), n); } /** * hlist_add_behind - add a new entry after the one specified * @n: new entry to be added * @prev: hlist node to add it after, which must be non-NULL */ static inline void hlist_add_behind(struct hlist_node *n, struct hlist_node *prev) { WRITE_ONCE(n->next, prev->next); WRITE_ONCE(prev->next, n); WRITE_ONCE(n->pprev, &prev->next); if (n->next) WRITE_ONCE(n->next->pprev, &n->next); } /** * hlist_add_fake - create a fake hlist consisting of a single headless node * @n: Node to make a fake list out of * * This makes @n appear to be its own predecessor on a headless hlist. * The point of this is to allow things like hlist_del() to work correctly * in cases where there is no list. */ static inline void hlist_add_fake(struct hlist_node *n) { n->pprev = &n->next; } /** * hlist_fake: Is this node a fake hlist? * @h: Node to check for being a self-referential fake hlist. */ static inline bool hlist_fake(struct hlist_node *h) { return h->pprev == &h->next; } /** * hlist_is_singular_node - is node the only element of the specified hlist? * @n: Node to check for singularity. * @h: Header for potentially singular list. * * Check whether the node is the only node of the head without * accessing head, thus avoiding unnecessary cache misses. */ static inline bool hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h) { return !n->next && n->pprev == &h->first; } /** * hlist_move_list - Move an hlist * @old: hlist_head for old list. * @new: hlist_head for new list. * * Move a list from one list head to another. Fixup the pprev * reference of the first entry if it exists. */ static inline void hlist_move_list(struct hlist_head *old, struct hlist_head *new) { new->first = old->first; if (new->first) new->first->pprev = &new->first; old->first = NULL; } /** * hlist_splice_init() - move all entries from one list to another * @from: hlist_head from which entries will be moved * @last: last entry on the @from list * @to: hlist_head to which entries will be moved * * @to can be empty, @from must contain at least @last. */ static inline void hlist_splice_init(struct hlist_head *from, struct hlist_node *last, struct hlist_head *to) { if (to->first) to->first->pprev = &last->next; last->next = to->first; to->first = from->first; from->first->pprev = &to->first; from->first = NULL; } #define hlist_entry(ptr, type, member) container_of(ptr,type,member) #define hlist_for_each(pos, head) \ for (pos = (head)->first; pos ; pos = pos->next) #define hlist_for_each_safe(pos, n, head) \ for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ pos = n) #define hlist_entry_safe(ptr, type, member) \ ({ typeof(ptr) ____ptr = (ptr); \ ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ }) /** * hlist_for_each_entry - iterate over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry(pos, head, member) \ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\ pos; \ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) /** * hlist_for_each_entry_continue - iterate over a hlist continuing after current point * @pos: the type * to use as a loop cursor. * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_continue(pos, member) \ for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\ pos; \ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) /** * hlist_for_each_entry_from - iterate over a hlist continuing from current point * @pos: the type * to use as a loop cursor. * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_from(pos, member) \ for (; pos; \ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) /** * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry * @pos: the type * to use as a loop cursor. * @n: a &struct hlist_node to use as temporary storage * @head: the head for your list. * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_safe(pos, n, head, member) \ for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\ pos && ({ n = pos->member.next; 1; }); \ pos = hlist_entry_safe(n, typeof(*pos), member)) /** * hlist_count_nodes - count nodes in the hlist * @head: the head for your hlist. */ static inline size_t hlist_count_nodes(struct hlist_head *head) { struct hlist_node *pos; size_t count = 0; hlist_for_each(pos, head) count++; return count; } #endif
108 5 6 2 2 115 113 3 3 47 1 18 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_VIRTIO_VSOCK_H #define _LINUX_VIRTIO_VSOCK_H #include <uapi/linux/virtio_vsock.h> #include <linux/socket.h> #include <net/sock.h> #include <net/af_vsock.h> #define VIRTIO_VSOCK_SKB_HEADROOM (sizeof(struct virtio_vsock_hdr)) struct virtio_vsock_skb_cb { bool reply; bool tap_delivered; u32 offset; }; #define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb)) static inline struct virtio_vsock_hdr *virtio_vsock_hdr(struct sk_buff *skb) { return (struct virtio_vsock_hdr *)skb->head; } static inline bool virtio_vsock_skb_reply(struct sk_buff *skb) { return VIRTIO_VSOCK_SKB_CB(skb)->reply; } static inline void virtio_vsock_skb_set_reply(struct sk_buff *skb) { VIRTIO_VSOCK_SKB_CB(skb)->reply = true; } static inline bool virtio_vsock_skb_tap_delivered(struct sk_buff *skb) { return VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered; } static inline void virtio_vsock_skb_set_tap_delivered(struct sk_buff *skb) { VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = true; } static inline void virtio_vsock_skb_clear_tap_delivered(struct sk_buff *skb) { VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = false; } static inline void virtio_vsock_skb_rx_put(struct sk_buff *skb) { u32 len; len = le32_to_cpu(virtio_vsock_hdr(skb)->len); if (len > 0) skb_put(skb, len); } static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask) { struct sk_buff *skb; if (size < VIRTIO_VSOCK_SKB_HEADROOM) return NULL; skb = alloc_skb(size, mask); if (!skb) return NULL; skb_reserve(skb, VIRTIO_VSOCK_SKB_HEADROOM); return skb; } static inline void virtio_vsock_skb_queue_head(struct sk_buff_head *list, struct sk_buff *skb) { spin_lock_bh(&list->lock); __skb_queue_head(list, skb); spin_unlock_bh(&list->lock); } static inline void virtio_vsock_skb_queue_tail(struct sk_buff_head *list, struct sk_buff *skb) { spin_lock_bh(&list->lock); __skb_queue_tail(list, skb); spin_unlock_bh(&list->lock); } static inline struct sk_buff *virtio_vsock_skb_dequeue(struct sk_buff_head *list) { struct sk_buff *skb; spin_lock_bh(&list->lock); skb = __skb_dequeue(list); spin_unlock_bh(&list->lock); return skb; } static inline void virtio_vsock_skb_queue_purge(struct sk_buff_head *list) { spin_lock_bh(&list->lock); __skb_queue_purge(list); spin_unlock_bh(&list->lock); } static inline size_t virtio_vsock_skb_len(struct sk_buff *skb) { return (size_t)(skb_end_pointer(skb) - skb->head); } #define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE (1024 * 4) #define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL #define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64) enum { VSOCK_VQ_RX = 0, /* for host to guest data */ VSOCK_VQ_TX = 1, /* for guest to host data */ VSOCK_VQ_EVENT = 2, VSOCK_VQ_MAX = 3, }; /* Per-socket state (accessed via vsk->trans) */ struct virtio_vsock_sock { struct vsock_sock *vsk; spinlock_t tx_lock; spinlock_t rx_lock; /* Protected by tx_lock */ u32 tx_cnt; u32 peer_fwd_cnt; u32 peer_buf_alloc; size_t bytes_unsent; /* Protected by rx_lock */ u32 fwd_cnt; u32 last_fwd_cnt; u32 rx_bytes; u32 buf_alloc; struct sk_buff_head rx_queue; u32 msg_count; }; struct virtio_vsock_pkt_info { u32 remote_cid, remote_port; struct vsock_sock *vsk; struct msghdr *msg; u32 pkt_len; u16 type; u16 op; u32 flags; bool reply; }; struct virtio_transport { /* This must be the first field */ struct vsock_transport transport; /* Takes ownership of the packet */ int (*send_pkt)(struct sk_buff *skb); /* Used in MSG_ZEROCOPY mode. Checks, that provided data * (number of buffers) could be transmitted with zerocopy * mode. If this callback is not implemented for the current * transport - this means that this transport doesn't need * extra checks and can perform zerocopy transmission by * default. */ bool (*can_msgzerocopy)(int bufs_num); }; ssize_t virtio_transport_stream_dequeue(struct vsock_sock *vsk, struct msghdr *msg, size_t len, int type); int virtio_transport_dgram_dequeue(struct vsock_sock *vsk, struct msghdr *msg, size_t len, int flags); int virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk, struct msghdr *msg, size_t len); ssize_t virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk, struct msghdr *msg, int flags); s64 virtio_transport_stream_has_data(struct vsock_sock *vsk); s64 virtio_transport_stream_has_space(struct vsock_sock *vsk); u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk); ssize_t virtio_transport_unsent_bytes(struct vsock_sock *vsk); void virtio_transport_consume_skb_sent(struct sk_buff *skb, bool consume); int virtio_transport_do_socket_init(struct vsock_sock *vsk, struct vsock_sock *psk); int virtio_transport_notify_poll_in(struct vsock_sock *vsk, size_t target, bool *data_ready_now); int virtio_transport_notify_poll_out(struct vsock_sock *vsk, size_t target, bool *space_available_now); int virtio_transport_notify_recv_init(struct vsock_sock *vsk, size_t target, struct vsock_transport_recv_notify_data *data); int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk, size_t target, struct vsock_transport_recv_notify_data *data); int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk, size_t target, struct vsock_transport_recv_notify_data *data); int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk, size_t target, ssize_t copied, bool data_read, struct vsock_transport_recv_notify_data *data); int virtio_transport_notify_send_init(struct vsock_sock *vsk, struct vsock_transport_send_notify_data *data); int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk, struct vsock_transport_send_notify_data *data); int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk, struct vsock_transport_send_notify_data *data); int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk, ssize_t written, struct vsock_transport_send_notify_data *data); void virtio_transport_notify_buffer_size(struct vsock_sock *vsk, u64 *val); u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk); bool virtio_transport_stream_is_active(struct vsock_sock *vsk); bool virtio_transport_stream_allow(u32 cid, u32 port); int virtio_transport_dgram_bind(struct vsock_sock *vsk, struct sockaddr_vm *addr); bool virtio_transport_dgram_allow(u32 cid, u32 port); int virtio_transport_connect(struct vsock_sock *vsk); int virtio_transport_shutdown(struct vsock_sock *vsk, int mode); void virtio_transport_release(struct vsock_sock *vsk); ssize_t virtio_transport_stream_enqueue(struct vsock_sock *vsk, struct msghdr *msg, size_t len); int virtio_transport_dgram_enqueue(struct vsock_sock *vsk, struct sockaddr_vm *remote_addr, struct msghdr *msg, size_t len); void virtio_transport_destruct(struct vsock_sock *vsk); void virtio_transport_recv_pkt(struct virtio_transport *t, struct sk_buff *skb); void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb); u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted); void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit); void virtio_transport_deliver_tap_pkt(struct sk_buff *skb); int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *list); int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t read_actor); int virtio_transport_notify_set_rcvlowat(struct vsock_sock *vsk, int val); #endif /* _LINUX_VIRTIO_VSOCK_H */
1544 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 /* SPDX-License-Identifier: GPL-2.0 */ /* * Thunderbolt driver - bus logic (NHI independent) * * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> * Copyright (C) 2018, Intel Corporation */ #ifndef TB_H_ #define TB_H_ #include <linux/debugfs.h> #include <linux/nvmem-provider.h> #include <linux/pci.h> #include <linux/thunderbolt.h> #include <linux/uuid.h> #include <linux/bitfield.h> #include "tb_regs.h" #include "ctl.h" #include "dma_port.h" /* Keep link controller awake during update */ #define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0) /* Disable CLx if not supported */ #define QUIRK_NO_CLX BIT(1) /* Need to keep power on while USB4 port is in redrive mode */ #define QUIRK_KEEP_POWER_IN_DP_REDRIVE BIT(2) /** * struct tb_nvm - Structure holding NVM information * @dev: Owner of the NVM * @major: Major version number of the active NVM portion * @minor: Minor version number of the active NVM portion * @id: Identifier used with both NVM portions * @active: Active portion NVMem device * @active_size: Size in bytes of the active NVM * @non_active: Non-active portion NVMem device * @buf: Buffer where the NVM image is stored before it is written to * the actual NVM flash device * @buf_data_start: Where the actual image starts after skipping * possible headers * @buf_data_size: Number of bytes actually consumed by the new NVM * image * @authenticating: The device is authenticating the new NVM * @flushed: The image has been flushed to the storage area * @vops: Router vendor specific NVM operations (optional) * * The user of this structure needs to handle serialization of possible * concurrent access. */ struct tb_nvm { struct device *dev; u32 major; u32 minor; int id; struct nvmem_device *active; size_t active_size; struct nvmem_device *non_active; void *buf; void *buf_data_start; size_t buf_data_size; bool authenticating; bool flushed; const struct tb_nvm_vendor_ops *vops; }; enum tb_nvm_write_ops { WRITE_AND_AUTHENTICATE = 1, WRITE_ONLY = 2, AUTHENTICATE_ONLY = 3, }; #define TB_SWITCH_KEY_SIZE 32 #define TB_SWITCH_MAX_DEPTH 6 #define USB4_SWITCH_MAX_DEPTH 5 /** * enum tb_switch_tmu_mode - TMU mode * @TB_SWITCH_TMU_MODE_OFF: TMU is off * @TB_SWITCH_TMU_MODE_LOWRES: Uni-directional, normal mode * @TB_SWITCH_TMU_MODE_HIFI_UNI: Uni-directional, HiFi mode * @TB_SWITCH_TMU_MODE_HIFI_BI: Bi-directional, HiFi mode * @TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI: Enhanced Uni-directional, MedRes mode * * Ordering is based on TMU accuracy level (highest last). */ enum tb_switch_tmu_mode { TB_SWITCH_TMU_MODE_OFF, TB_SWITCH_TMU_MODE_LOWRES, TB_SWITCH_TMU_MODE_HIFI_UNI, TB_SWITCH_TMU_MODE_HIFI_BI, TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI, }; /** * struct tb_switch_tmu - Structure holding router TMU configuration * @cap: Offset to the TMU capability (%0 if not found) * @has_ucap: Does the switch support uni-directional mode * @mode: TMU mode related to the upstream router. Reflects the HW * setting. Don't care for host router. * @mode_request: TMU mode requested to set. Related to upstream router. * Don't care for host router. */ struct tb_switch_tmu { int cap; bool has_ucap; enum tb_switch_tmu_mode mode; enum tb_switch_tmu_mode mode_request; }; /** * struct tb_switch - a thunderbolt switch * @dev: Device for the switch * @config: Switch configuration * @ports: Ports in this switch * @dma_port: If the switch has port supporting DMA configuration based * mailbox this will hold the pointer to that (%NULL * otherwise). If set it also means the switch has * upgradeable NVM. * @tmu: The switch TMU configuration * @tb: Pointer to the domain the switch belongs to * @uid: Unique ID of the switch * @uuid: UUID of the switch (or %NULL if not supported) * @vendor: Vendor ID of the switch * @device: Device ID of the switch * @vendor_name: Name of the vendor (or %NULL if not known) * @device_name: Name of the device (or %NULL if not known) * @link_speed: Speed of the link in Gb/s * @link_width: Width of the upstream facing link * @preferred_link_width: Router preferred link width (only set for Gen 4 links) * @link_usb4: Upstream link is USB4 * @generation: Switch Thunderbolt generation * @cap_plug_events: Offset to the plug events capability (%0 if not found) * @cap_vsec_tmu: Offset to the TMU vendor specific capability (%0 if not found) * @cap_lc: Offset to the link controller capability (%0 if not found) * @cap_lp: Offset to the low power (CLx for TBT) capability (%0 if not found) * @is_unplugged: The switch is going away * @drom: DROM of the switch (%NULL if not found) * @nvm: Pointer to the NVM if the switch has one (%NULL otherwise) * @no_nvm_upgrade: Prevent NVM upgrade of this switch * @safe_mode: The switch is in safe-mode * @boot: Whether the switch was already authorized on boot or not * @rpm: The switch supports runtime PM * @authorized: Whether the switch is authorized by user or policy * @security_level: Switch supported security level * @debugfs_dir: Pointer to the debugfs structure * @key: Contains the key used to challenge the device or %NULL if not * supported. Size of the key is %TB_SWITCH_KEY_SIZE. * @connection_id: Connection ID used with ICM messaging * @connection_key: Connection key used with ICM messaging * @link: Root switch link this switch is connected (ICM only) * @depth: Depth in the chain this switch is connected (ICM only) * @rpm_complete: Completion used to wait for runtime resume to * complete (ICM only) * @quirks: Quirks used for this Thunderbolt switch * @credit_allocation: Are the below buffer allocation parameters valid * @max_usb3_credits: Router preferred number of buffers for USB 3.x * @min_dp_aux_credits: Router preferred minimum number of buffers for DP AUX * @min_dp_main_credits: Router preferred minimum number of buffers for DP MAIN * @max_pcie_credits: Router preferred number of buffers for PCIe * @max_dma_credits: Router preferred number of buffers for DMA/P2P * @clx: CLx states on the upstream link of the router * @drom_blob: DROM debugfs blob wrapper * * When the switch is being added or removed to the domain (other * switches) you need to have domain lock held. * * In USB4 terminology this structure represents a router. */ struct tb_switch { struct device dev; struct tb_regs_switch_header config; struct tb_port *ports; struct tb_dma_port *dma_port; struct tb_switch_tmu tmu; struct tb *tb; u64 uid; uuid_t *uuid; u16 vendor; u16 device; const char *vendor_name; const char *device_name; unsigned int link_speed; enum tb_link_width link_width; enum tb_link_width preferred_link_width; bool link_usb4; unsigned int generation; int cap_plug_events; int cap_vsec_tmu; int cap_lc; int cap_lp; bool is_unplugged; u8 *drom; struct tb_nvm *nvm; bool no_nvm_upgrade; bool safe_mode; bool boot; bool rpm; unsigned int authorized; enum tb_security_level security_level; struct dentry *debugfs_dir; u8 *key; u8 connection_id; u8 connection_key; u8 link; u8 depth; struct completion rpm_complete; unsigned long quirks; bool credit_allocation; unsigned int max_usb3_credits; unsigned int min_dp_aux_credits; unsigned int min_dp_main_credits; unsigned int max_pcie_credits; unsigned int max_dma_credits; unsigned int clx; #ifdef CONFIG_DEBUG_FS struct debugfs_blob_wrapper drom_blob; #endif }; /** * struct tb_bandwidth_group - Bandwidth management group * @tb: Pointer to the domain the group belongs to * @index: Index of the group (aka Group_ID). Valid values %1-%7 * @ports: DP IN adapters belonging to this group are linked here * @reserved: Bandwidth released by one tunnel in the group, available * to others. This is reported as part of estimated_bw for * the group. * @release_work: Worker to release the @reserved if it is not used by * any of the tunnels. * * Any tunnel that requires isochronous bandwidth (that's DP for now) is * attached to a bandwidth group. All tunnels going through the same * USB4 links share the same group and can dynamically distribute the * bandwidth within the group. */ struct tb_bandwidth_group { struct tb *tb; int index; struct list_head ports; int reserved; struct delayed_work release_work; }; /** * struct tb_port - a thunderbolt port, part of a tb_switch * @config: Cached port configuration read from registers * @sw: Switch the port belongs to * @remote: Remote port (%NULL if not connected) * @xdomain: Remote host (%NULL if not connected) * @cap_phy: Offset, zero if not found * @cap_tmu: Offset of the adapter specific TMU capability (%0 if not present) * @cap_adap: Offset of the adapter specific capability (%0 if not present) * @cap_usb4: Offset to the USB4 port capability (%0 if not present) * @usb4: Pointer to the USB4 port structure (only if @cap_usb4 is != %0) * @port: Port number on switch * @disabled: Disabled by eeprom or enabled but not implemented * @bonded: true if the port is bonded (two lanes combined as one) * @dual_link_port: If the switch is connected using two ports, points * to the other port. * @link_nr: Is this primary or secondary port on the dual_link. * @in_hopids: Currently allocated input HopIDs * @out_hopids: Currently allocated output HopIDs * @list: Used to link ports to DP resources list * @total_credits: Total number of buffers available for this port * @ctl_credits: Buffers reserved for control path * @dma_credits: Number of credits allocated for DMA tunneling for all * DMA paths through this port. * @group: Bandwidth allocation group the adapter is assigned to. Only * used for DP IN adapters for now. * @group_list: The adapter is linked to the group's list of ports through this * @max_bw: Maximum possible bandwidth through this adapter if set to * non-zero. * @redrive: For DP IN, if true the adapter is in redrive mode. * * In USB4 terminology this structure represents an adapter (protocol or * lane adapter). */ struct tb_port { struct tb_regs_port_header config; struct tb_switch *sw; struct tb_port *remote; struct tb_xdomain *xdomain; int cap_phy; int cap_tmu; int cap_adap; int cap_usb4; struct usb4_port *usb4; u8 port; bool disabled; bool bonded; struct tb_port *dual_link_port; u8 link_nr:1; struct ida in_hopids; struct ida out_hopids; struct list_head list; unsigned int total_credits; unsigned int ctl_credits; unsigned int dma_credits; struct tb_bandwidth_group *group; struct list_head group_list; unsigned int max_bw; bool redrive; }; /** * struct usb4_port - USB4 port device * @dev: Device for the port * @port: Pointer to the lane 0 adapter * @can_offline: Does the port have necessary platform support to moved * it into offline mode and back * @offline: The port is currently in offline mode * @margining: Pointer to margining structure if enabled */ struct usb4_port { struct device dev; struct tb_port *port; bool can_offline; bool offline; #ifdef CONFIG_USB4_DEBUGFS_MARGINING struct tb_margining *margining; #endif }; /** * tb_retimer: Thunderbolt retimer * @dev: Device for the retimer * @tb: Pointer to the domain the retimer belongs to * @index: Retimer index facing the router USB4 port * @vendor: Vendor ID of the retimer * @device: Device ID of the retimer * @port: Pointer to the lane 0 adapter * @nvm: Pointer to the NVM if the retimer has one (%NULL otherwise) * @no_nvm_upgrade: Prevent NVM upgrade of this retimer * @auth_status: Status of last NVM authentication * @margining: Pointer to margining structure if enabled */ struct tb_retimer { struct device dev; struct tb *tb; u8 index; u32 vendor; u32 device; struct tb_port *port; struct tb_nvm *nvm; bool no_nvm_upgrade; u32 auth_status; #ifdef CONFIG_USB4_DEBUGFS_MARGINING struct tb_margining *margining; #endif }; /** * struct tb_path_hop - routing information for a tb_path * @in_port: Ingress port of a switch * @out_port: Egress port of a switch where the packet is routed out * (must be on the same switch than @in_port) * @in_hop_index: HopID where the path configuration entry is placed in * the path config space of @in_port. * @in_counter_index: Used counter index (not used in the driver * currently, %-1 to disable) * @next_hop_index: HopID of the packet when it is routed out from @out_port * @initial_credits: Number of initial flow control credits allocated for * the path * @nfc_credits: Number of non-flow controlled buffers allocated for the * @in_port. * @pm_support: Set path PM packet support bit to 1 (for USB4 v2 routers) * * Hop configuration is always done on the IN port of a switch. * in_port and out_port have to be on the same switch. Packets arriving on * in_port with "hop" = in_hop_index will get routed to through out_port. The * next hop to take (on out_port->remote) is determined by * next_hop_index. When routing packet to another switch (out->remote is * set) the @next_hop_index must match the @in_hop_index of that next * hop to make routing possible. * * in_counter_index is the index of a counter (in TB_CFG_COUNTERS) on the in * port. */ struct tb_path_hop { struct tb_port *in_port; struct tb_port *out_port; int in_hop_index; int in_counter_index; int next_hop_index; unsigned int initial_credits; unsigned int nfc_credits; bool pm_support; }; /** * enum tb_path_port - path options mask * @TB_PATH_NONE: Do not activate on any hop on path * @TB_PATH_SOURCE: Activate on the first hop (out of src) * @TB_PATH_INTERNAL: Activate on the intermediate hops (not the first/last) * @TB_PATH_DESTINATION: Activate on the last hop (into dst) * @TB_PATH_ALL: Activate on all hops on the path */ enum tb_path_port { TB_PATH_NONE = 0, TB_PATH_SOURCE = 1, TB_PATH_INTERNAL = 2, TB_PATH_DESTINATION = 4, TB_PATH_ALL = 7, }; /** * struct tb_path - a unidirectional path between two ports * @tb: Pointer to the domain structure * @name: Name of the path (used for debugging) * @ingress_shared_buffer: Shared buffering used for ingress ports on the path * @egress_shared_buffer: Shared buffering used for egress ports on the path * @ingress_fc_enable: Flow control for ingress ports on the path * @egress_fc_enable: Flow control for egress ports on the path * @priority: Priority group if the path * @weight: Weight of the path inside the priority group * @drop_packages: Drop packages from queue tail or head * @activated: Is the path active * @clear_fc: Clear all flow control from the path config space entries * when deactivating this path * @hops: Path hops * @path_length: How many hops the path uses * @alloc_hopid: Does this path consume port HopID * * A path consists of a number of hops (see &struct tb_path_hop). To * establish a PCIe tunnel two paths have to be created between the two * PCIe ports. */ struct tb_path { struct tb *tb; const char *name; enum tb_path_port ingress_shared_buffer; enum tb_path_port egress_shared_buffer; enum tb_path_port ingress_fc_enable; enum tb_path_port egress_fc_enable; unsigned int priority:3; int weight:4; bool drop_packages; bool activated; bool clear_fc; struct tb_path_hop *hops; int path_length; bool alloc_hopid; }; /* HopIDs 0-7 are reserved by the Thunderbolt protocol */ #define TB_PATH_MIN_HOPID 8 /* * Support paths from the farthest (depth 6) router to the host and back * to the same level (not necessarily to the same router). */ #define TB_PATH_MAX_HOPS (7 * 2) /* Possible wake types */ #define TB_WAKE_ON_CONNECT BIT(0) #define TB_WAKE_ON_DISCONNECT BIT(1) #define TB_WAKE_ON_USB4 BIT(2) #define TB_WAKE_ON_USB3 BIT(3) #define TB_WAKE_ON_PCIE BIT(4) #define TB_WAKE_ON_DP BIT(5) /* CL states */ #define TB_CL0S BIT(0) #define TB_CL1 BIT(1) #define TB_CL2 BIT(2) /** * struct tb_cm_ops - Connection manager specific operations vector * @driver_ready: Called right after control channel is started. Used by * ICM to send driver ready message to the firmware. * @start: Starts the domain * @stop: Stops the domain * @deinit: Perform any cleanup after the domain is stopped but before * it is unregistered. Called without @tb->lock taken. Optional. * @suspend_noirq: Connection manager specific suspend_noirq * @resume_noirq: Connection manager specific resume_noirq * @suspend: Connection manager specific suspend * @freeze_noirq: Connection manager specific freeze_noirq * @thaw_noirq: Connection manager specific thaw_noirq * @complete: Connection manager specific complete * @runtime_suspend: Connection manager specific runtime_suspend * @runtime_resume: Connection manager specific runtime_resume * @runtime_suspend_switch: Runtime suspend a switch * @runtime_resume_switch: Runtime resume a switch * @handle_event: Handle thunderbolt event * @get_boot_acl: Get boot ACL list * @set_boot_acl: Set boot ACL list * @disapprove_switch: Disapprove switch (disconnect PCIe tunnel) * @approve_switch: Approve switch * @add_switch_key: Add key to switch * @challenge_switch_key: Challenge switch using key * @disconnect_pcie_paths: Disconnects PCIe paths before NVM update * @approve_xdomain_paths: Approve (establish) XDomain DMA paths * @disconnect_xdomain_paths: Disconnect XDomain DMA paths * @usb4_switch_op: Optional proxy for USB4 router operations. If set * this will be called whenever USB4 router operation is * performed. If this returns %-EOPNOTSUPP then the * native USB4 router operation is called. * @usb4_switch_nvm_authenticate_status: Optional callback that the CM * implementation can be used to * return status of USB4 NVM_AUTH * router operation. */ struct tb_cm_ops { int (*driver_ready)(struct tb *tb); int (*start)(struct tb *tb, bool reset); void (*stop)(struct tb *tb); void (*deinit)(struct tb *tb); int (*suspend_noirq)(struct tb *tb); int (*resume_noirq)(struct tb *tb); int (*suspend)(struct tb *tb); int (*freeze_noirq)(struct tb *tb); int (*thaw_noirq)(struct tb *tb); void (*complete)(struct tb *tb); int (*runtime_suspend)(struct tb *tb); int (*runtime_resume)(struct tb *tb); int (*runtime_suspend_switch)(struct tb_switch *sw); int (*runtime_resume_switch)(struct tb_switch *sw); void (*handle_event)(struct tb *tb, enum tb_cfg_pkg_type, const void *buf, size_t size); int (*get_boot_acl)(struct tb *tb, uuid_t *uuids, size_t nuuids); int (*set_boot_acl)(struct tb *tb, const uuid_t *uuids, size_t nuuids); int (*disapprove_switch)(struct tb *tb, struct tb_switch *sw); int (*approve_switch)(struct tb *tb, struct tb_switch *sw); int (*add_switch_key)(struct tb *tb, struct tb_switch *sw); int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw, const u8 *challenge, u8 *response); int (*disconnect_pcie_paths)(struct tb *tb); int (*approve_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd, int transmit_path, int transmit_ring, int receive_path, int receive_ring); int (*disconnect_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd, int transmit_path, int transmit_ring, int receive_path, int receive_ring); int (*usb4_switch_op)(struct tb_switch *sw, u16 opcode, u32 *metadata, u8 *status, const void *tx_data, size_t tx_data_len, void *rx_data, size_t rx_data_len); int (*usb4_switch_nvm_authenticate_status)(struct tb_switch *sw, u32 *status); }; static inline void *tb_priv(struct tb *tb) { return (void *)tb->privdata; } #define TB_AUTOSUSPEND_DELAY 15000 /* ms */ /* helper functions & macros */ /** * tb_upstream_port() - return the upstream port of a switch * * Every switch has an upstream port (for the root switch it is the NHI). * * During switch alloc/init tb_upstream_port()->remote may be NULL, even for * non root switches (on the NHI port remote is always NULL). * * Return: Returns the upstream port of the switch. */ static inline struct tb_port *tb_upstream_port(struct tb_switch *sw) { return &sw->ports[sw->config.upstream_port_number]; } /** * tb_is_upstream_port() - Is the port upstream facing * @port: Port to check * * Returns true if @port is upstream facing port. In case of dual link * ports both return true. */ static inline bool tb_is_upstream_port(const struct tb_port *port) { const struct tb_port *upstream_port = tb_upstream_port(port->sw); return port == upstream_port || port->dual_link_port == upstream_port; } static inline u64 tb_route(const struct tb_switch *sw) { return ((u64) sw->config.route_hi) << 32 | sw->config.route_lo; } static inline struct tb_port *tb_port_at(u64 route, struct tb_switch *sw) { u8 port; port = route >> (sw->config.depth * 8); if (WARN_ON(port > sw->config.max_port_number)) return NULL; return &sw->ports[port]; } static inline const char *tb_width_name(enum tb_link_width width) { switch (width) { case TB_LINK_WIDTH_SINGLE: return "symmetric, single lane"; case TB_LINK_WIDTH_DUAL: return "symmetric, dual lanes"; case TB_LINK_WIDTH_ASYM_TX: return "asymmetric, 3 transmitters, 1 receiver"; case TB_LINK_WIDTH_ASYM_RX: return "asymmetric, 3 receivers, 1 transmitter"; default: return "unknown"; } } /** * tb_port_has_remote() - Does the port have switch connected downstream * @port: Port to check * * Returns true only when the port is primary port and has remote set. */ static inline bool tb_port_has_remote(const struct tb_port *port) { if (tb_is_upstream_port(port)) return false; if (!port->remote) return false; if (port->dual_link_port && port->link_nr) return false; return true; } static inline bool tb_port_is_null(const struct tb_port *port) { return port && port->port && port->config.type == TB_TYPE_PORT; } static inline bool tb_port_is_nhi(const struct tb_port *port) { return port && port->config.type == TB_TYPE_NHI; } static inline bool tb_port_is_pcie_down(const struct tb_port *port) { return port && port->config.type == TB_TYPE_PCIE_DOWN; } static inline bool tb_port_is_pcie_up(const struct tb_port *port) { return port && port->config.type == TB_TYPE_PCIE_UP; } static inline bool tb_port_is_dpin(const struct tb_port *port) { return port && port->config.type == TB_TYPE_DP_HDMI_IN; } static inline bool tb_port_is_dpout(const struct tb_port *port) { return port && port->config.type == TB_TYPE_DP_HDMI_OUT; } static inline bool tb_port_is_usb3_down(const struct tb_port *port) { return port && port->config.type == TB_TYPE_USB3_DOWN; } static inline bool tb_port_is_usb3_up(const struct tb_port *port) { return port && port->config.type == TB_TYPE_USB3_UP; } static inline int tb_sw_read(struct tb_switch *sw, void *buffer, enum tb_cfg_space space, u32 offset, u32 length) { if (sw->is_unplugged) return -ENODEV; return tb_cfg_read(sw->tb->ctl, buffer, tb_route(sw), 0, space, offset, length); } static inline int tb_sw_write(struct tb_switch *sw, const void *buffer, enum tb_cfg_space space, u32 offset, u32 length) { if (sw->is_unplugged) return -ENODEV; return tb_cfg_write(sw->tb->ctl, buffer, tb_route(sw), 0, space, offset, length); } static inline int tb_port_read(struct tb_port *port, void *buffer, enum tb_cfg_space space, u32 offset, u32 length) { if (port->sw->is_unplugged) return -ENODEV; return tb_cfg_read(port->sw->tb->ctl, buffer, tb_route(port->sw), port->port, space, offset, length); } static inline int tb_port_write(struct tb_port *port, const void *buffer, enum tb_cfg_space space, u32 offset, u32 length) { if (port->sw->is_unplugged) return -ENODEV; return tb_cfg_write(port->sw->tb->ctl, buffer, tb_route(port->sw), port->port, space, offset, length); } #define tb_err(tb, fmt, arg...) dev_err(&(tb)->nhi->pdev->dev, fmt, ## arg) #define tb_WARN(tb, fmt, arg...) dev_WARN(&(tb)->nhi->pdev->dev, fmt, ## arg) #define tb_warn(tb, fmt, arg...) dev_warn(&(tb)->nhi->pdev->dev, fmt, ## arg) #define tb_info(tb, fmt, arg...) dev_info(&(tb)->nhi->pdev->dev, fmt, ## arg) #define tb_dbg(tb, fmt, arg...) dev_dbg(&(tb)->nhi->pdev->dev, fmt, ## arg) #define __TB_SW_PRINT(level, sw, fmt, arg...) \ do { \ const struct tb_switch *__sw = (sw); \ level(__sw->tb, "%llx: " fmt, \ tb_route(__sw), ## arg); \ } while (0) #define tb_sw_WARN(sw, fmt, arg...) __TB_SW_PRINT(tb_WARN, sw, fmt, ##arg) #define tb_sw_warn(sw, fmt, arg...) __TB_SW_PRINT(tb_warn, sw, fmt, ##arg) #define tb_sw_info(sw, fmt, arg...) __TB_SW_PRINT(tb_info, sw, fmt, ##arg) #define tb_sw_dbg(sw, fmt, arg...) __TB_SW_PRINT(tb_dbg, sw, fmt, ##arg) #define __TB_PORT_PRINT(level, _port, fmt, arg...) \ do { \ const struct tb_port *__port = (_port); \ level(__port->sw->tb, "%llx:%u: " fmt, \ tb_route(__port->sw), __port->port, ## arg); \ } while (0) #define tb_port_WARN(port, fmt, arg...) \ __TB_PORT_PRINT(tb_WARN, port, fmt, ##arg) #define tb_port_warn(port, fmt, arg...) \ __TB_PORT_PRINT(tb_warn, port, fmt, ##arg) #define tb_port_info(port, fmt, arg...) \ __TB_PORT_PRINT(tb_info, port, fmt, ##arg) #define tb_port_dbg(port, fmt, arg...) \ __TB_PORT_PRINT(tb_dbg, port, fmt, ##arg) struct tb *icm_probe(struct tb_nhi *nhi); struct tb *tb_probe(struct tb_nhi *nhi); extern const struct device_type tb_domain_type; extern const struct device_type tb_retimer_type; extern const struct device_type tb_switch_type; extern const struct device_type usb4_port_device_type; int tb_domain_init(void); void tb_domain_exit(void); int tb_xdomain_init(void); void tb_xdomain_exit(void); struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize); int tb_domain_add(struct tb *tb, bool reset); void tb_domain_remove(struct tb *tb); int tb_domain_suspend_noirq(struct tb *tb); int tb_domain_resume_noirq(struct tb *tb); int tb_domain_suspend(struct tb *tb); int tb_domain_freeze_noirq(struct tb *tb); int tb_domain_thaw_noirq(struct tb *tb); void tb_domain_complete(struct tb *tb); int tb_domain_runtime_suspend(struct tb *tb); int tb_domain_runtime_resume(struct tb *tb); int tb_domain_disapprove_switch(struct tb *tb, struct tb_switch *sw); int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw); int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw); int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw); int tb_domain_disconnect_pcie_paths(struct tb *tb); int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, int transmit_path, int transmit_ring, int receive_path, int receive_ring); int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, int transmit_path, int transmit_ring, int receive_path, int receive_ring); int tb_domain_disconnect_all_paths(struct tb *tb); static inline struct tb *tb_domain_get(struct tb *tb) { if (tb) get_device(&tb->dev); return tb; } static inline void tb_domain_put(struct tb *tb) { put_device(&tb->dev); } struct tb_nvm *tb_nvm_alloc(struct device *dev); int tb_nvm_read_version(struct tb_nvm *nvm); int tb_nvm_validate(struct tb_nvm *nvm); int tb_nvm_write_headers(struct tb_nvm *nvm); int tb_nvm_add_active(struct tb_nvm *nvm, nvmem_reg_read_t reg_read); int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val, size_t bytes); int tb_nvm_add_non_active(struct tb_nvm *nvm, nvmem_reg_write_t reg_write); void tb_nvm_free(struct tb_nvm *nvm); void tb_nvm_exit(void); typedef int (*read_block_fn)(void *, unsigned int, void *, size_t); typedef int (*write_block_fn)(void *, unsigned int, const void *, size_t); int tb_nvm_read_data(unsigned int address, void *buf, size_t size, unsigned int retries, read_block_fn read_block, void *read_block_data); int tb_nvm_write_data(unsigned int address, const void *buf, size_t size, unsigned int retries, write_block_fn write_next_block, void *write_block_data); int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, size_t size); struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, u64 route); struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route); int tb_switch_configure(struct tb_switch *sw); int tb_switch_configuration_valid(struct tb_switch *sw); int tb_switch_add(struct tb_switch *sw); void tb_switch_remove(struct tb_switch *sw); void tb_switch_suspend(struct tb_switch *sw, bool runtime); int tb_switch_resume(struct tb_switch *sw, bool runtime); int tb_switch_reset(struct tb_switch *sw); int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit, u32 value, int timeout_msec); void tb_sw_set_unplugged(struct tb_switch *sw); struct tb_port *tb_switch_find_port(struct tb_switch *sw, enum tb_port_type type); struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth); struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid); struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route); /** * tb_switch_for_each_port() - Iterate over each switch port * @sw: Switch whose ports to iterate * @p: Port used as iterator * * Iterates over each switch port skipping the control port (port %0). */ #define tb_switch_for_each_port(sw, p) \ for ((p) = &(sw)->ports[1]; \ (p) <= &(sw)->ports[(sw)->config.max_port_number]; (p)++) static inline struct tb_switch *tb_switch_get(struct tb_switch *sw) { if (sw) get_device(&sw->dev); return sw; } static inline void tb_switch_put(struct tb_switch *sw) { put_device(&sw->dev); } static inline bool tb_is_switch(const struct device *dev) { return dev->type == &tb_switch_type; } static inline struct tb_switch *tb_to_switch(const struct device *dev) { if (tb_is_switch(dev)) return container_of(dev, struct tb_switch, dev); return NULL; } static inline struct tb_switch *tb_switch_parent(struct tb_switch *sw) { return tb_to_switch(sw->dev.parent); } /** * tb_switch_downstream_port() - Return downstream facing port of parent router * @sw: Device router pointer * * Only call for device routers. Returns the downstream facing port of * the parent router. */ static inline struct tb_port *tb_switch_downstream_port(struct tb_switch *sw) { if (WARN_ON(!tb_route(sw))) return NULL; return tb_port_at(tb_route(sw), tb_switch_parent(sw)); } /** * tb_switch_depth() - Returns depth of the connected router * @sw: Router */ static inline int tb_switch_depth(const struct tb_switch *sw) { return sw->config.depth; } static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw) { return sw->config.vendor_id == PCI_VENDOR_ID_INTEL && sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE; } static inline bool tb_switch_is_eagle_ridge(const struct tb_switch *sw) { return sw->config.vendor_id == PCI_VENDOR_ID_INTEL && sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE; } static inline bool tb_switch_is_cactus_ridge(const struct tb_switch *sw) { if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { switch (sw->config.device_id) { case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: return true; } } return false; } static inline bool tb_switch_is_falcon_ridge(const struct tb_switch *sw) { if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { switch (sw->config.device_id) { case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: return true; } } return false; } static inline bool tb_switch_is_alpine_ridge(const struct tb_switch *sw) { if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { switch (sw->config.device_id) { case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: return true; } } return false; } static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw) { if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { switch (sw->config.device_id) { case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: return true; } } return false; } static inline bool tb_switch_is_tiger_lake(const struct tb_switch *sw) { if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { switch (sw->config.device_id) { case PCI_DEVICE_ID_INTEL_TGL_NHI0: case PCI_DEVICE_ID_INTEL_TGL_NHI1: case PCI_DEVICE_ID_INTEL_TGL_H_NHI0: case PCI_DEVICE_ID_INTEL_TGL_H_NHI1: return true; } } return false; } /** * tb_switch_is_icm() - Is the switch handled by ICM firmware * @sw: Switch to check * * In case there is a need to differentiate whether ICM firmware or SW CM * is handling @sw this function can be called. It is valid to call this * after tb_switch_alloc() and tb_switch_configure() has been called * (latter only for SW CM case). */ static inline bool tb_switch_is_icm(const struct tb_switch *sw) { return !sw->config.enabled; } int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width); int tb_switch_configure_link(struct tb_switch *sw); void tb_switch_unconfigure_link(struct tb_switch *sw); bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in); int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in); void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in); int tb_switch_tmu_init(struct tb_switch *sw); int tb_switch_tmu_post_time(struct tb_switch *sw); int tb_switch_tmu_disable(struct tb_switch *sw); int tb_switch_tmu_enable(struct tb_switch *sw); int tb_switch_tmu_configure(struct tb_switch *sw, enum tb_switch_tmu_mode mode); /** * tb_switch_tmu_is_configured() - Is given TMU mode configured * @sw: Router whose mode to check * @mode: Mode to check * * Checks if given router TMU mode is configured to @mode. Note the * router TMU might not be enabled to this mode. */ static inline bool tb_switch_tmu_is_configured(const struct tb_switch *sw, enum tb_switch_tmu_mode mode) { return sw->tmu.mode_request == mode; } /** * tb_switch_tmu_is_enabled() - Checks if the specified TMU mode is enabled * @sw: Router whose TMU mode to check * * Return true if hardware TMU configuration matches the requested * configuration (and is not %TB_SWITCH_TMU_MODE_OFF). */ static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw) { return sw->tmu.mode != TB_SWITCH_TMU_MODE_OFF && sw->tmu.mode == sw->tmu.mode_request; } bool tb_port_clx_is_enabled(struct tb_port *port, unsigned int clx); int tb_switch_clx_init(struct tb_switch *sw); int tb_switch_clx_enable(struct tb_switch *sw, unsigned int clx); int tb_switch_clx_disable(struct tb_switch *sw); /** * tb_switch_clx_is_enabled() - Checks if the CLx is enabled * @sw: Router to check for the CLx * @clx: The CLx states to check for * * Checks if the specified CLx is enabled on the router upstream link. * Returns true if any of the given states is enabled. * * Not applicable for a host router. */ static inline bool tb_switch_clx_is_enabled(const struct tb_switch *sw, unsigned int clx) { return sw->clx & clx; } int tb_switch_pcie_l1_enable(struct tb_switch *sw); int tb_switch_xhci_connect(struct tb_switch *sw); void tb_switch_xhci_disconnect(struct tb_switch *sw); int tb_port_state(struct tb_port *port); int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged); int tb_port_add_nfc_credits(struct tb_port *port, int credits); int tb_port_clear_counter(struct tb_port *port, int counter); int tb_port_unlock(struct tb_port *port); int tb_port_enable(struct tb_port *port); int tb_port_disable(struct tb_port *port); int tb_port_alloc_in_hopid(struct tb_port *port, int hopid, int max_hopid); void tb_port_release_in_hopid(struct tb_port *port, int hopid); int tb_port_alloc_out_hopid(struct tb_port *port, int hopid, int max_hopid); void tb_port_release_out_hopid(struct tb_port *port, int hopid); struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, struct tb_port *prev); /** * tb_port_path_direction_downstream() - Checks if path directed downstream * @src: Source adapter * @dst: Destination adapter * * Returns %true only if the specified path from source adapter (@src) * to destination adapter (@dst) is directed downstream. */ static inline bool tb_port_path_direction_downstream(const struct tb_port *src, const struct tb_port *dst) { return src->sw->config.depth < dst->sw->config.depth; } static inline bool tb_port_use_credit_allocation(const struct tb_port *port) { return tb_port_is_null(port) && port->sw->credit_allocation; } /** * tb_for_each_port_on_path() - Iterate over each port on path * @src: Source port * @dst: Destination port * @p: Port used as iterator * * Walks over each port on path from @src to @dst. */ #define tb_for_each_port_on_path(src, dst, p) \ for ((p) = tb_next_port_on_path((src), (dst), NULL); (p); \ (p) = tb_next_port_on_path((src), (dst), (p))) /** * tb_for_each_upstream_port_on_path() - Iterate over each upstreamm port on path * @src: Source port * @dst: Destination port * @p: Port used as iterator * * Walks over each upstream lane adapter on path from @src to @dst. */ #define tb_for_each_upstream_port_on_path(src, dst, p) \ for ((p) = tb_next_port_on_path((src), (dst), NULL); (p); \ (p) = tb_next_port_on_path((src), (dst), (p))) \ if (!tb_port_is_null((p)) || !tb_is_upstream_port((p))) {\ continue; \ } else int tb_port_get_link_speed(struct tb_port *port); int tb_port_get_link_generation(struct tb_port *port); int tb_port_get_link_width(struct tb_port *port); bool tb_port_width_supported(struct tb_port *port, unsigned int width); int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width); int tb_port_lane_bonding_enable(struct tb_port *port); void tb_port_lane_bonding_disable(struct tb_port *port); int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width, int timeout_msec); int tb_port_update_credits(struct tb_port *port); int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec); int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap); int tb_switch_next_cap(struct tb_switch *sw, unsigned int offset); int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap); int tb_port_next_cap(struct tb_port *port, unsigned int offset); bool tb_port_is_enabled(struct tb_port *port); bool tb_usb3_port_is_enabled(struct tb_port *port); int tb_usb3_port_enable(struct tb_port *port, bool enable); bool tb_pci_port_is_enabled(struct tb_port *port); int tb_pci_port_enable(struct tb_port *port, bool enable); int tb_dp_port_hpd_is_active(struct tb_port *port); int tb_dp_port_hpd_clear(struct tb_port *port); int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, unsigned int aux_tx, unsigned int aux_rx); bool tb_dp_port_is_enabled(struct tb_port *port); int tb_dp_port_enable(struct tb_port *port, bool enable); struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid, struct tb_port *dst, int dst_hopid, struct tb_port **last, const char *name, bool alloc_hopid); struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid, struct tb_port *dst, int dst_hopid, int link_nr, const char *name); void tb_path_free(struct tb_path *path); int tb_path_activate(struct tb_path *path); void tb_path_deactivate(struct tb_path *path); int tb_path_deactivate_hop(struct tb_port *port, int hop_index); bool tb_path_is_invalid(struct tb_path *path); bool tb_path_port_on_path(const struct tb_path *path, const struct tb_port *port); /** * tb_path_for_each_hop() - Iterate over each hop on path * @path: Path whose hops to iterate * @hop: Hop used as iterator * * Iterates over each hop on path. */ #define tb_path_for_each_hop(path, hop) \ for ((hop) = &(path)->hops[0]; \ (hop) <= &(path)->hops[(path)->path_length - 1]; (hop)++) int tb_drom_read(struct tb_switch *sw); int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid); int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid); int tb_lc_reset_port(struct tb_port *port); int tb_lc_configure_port(struct tb_port *port); void tb_lc_unconfigure_port(struct tb_port *port); int tb_lc_configure_xdomain(struct tb_port *port); void tb_lc_unconfigure_xdomain(struct tb_port *port); int tb_lc_start_lane_initialization(struct tb_port *port); bool tb_lc_is_clx_supported(struct tb_port *port); bool tb_lc_is_usb_plugged(struct tb_port *port); bool tb_lc_is_xhci_connected(struct tb_port *port); int tb_lc_xhci_connect(struct tb_port *port); void tb_lc_xhci_disconnect(struct tb_port *port); int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags); int tb_lc_set_sleep(struct tb_switch *sw); bool tb_lc_lane_bonding_possible(struct tb_switch *sw); bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in); int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in); int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in); int tb_lc_force_power(struct tb_switch *sw); static inline int tb_route_length(u64 route) { return (fls64(route) + TB_ROUTE_SHIFT - 1) / TB_ROUTE_SHIFT; } /** * tb_downstream_route() - get route to downstream switch * * Port must not be the upstream port (otherwise a loop is created). * * Return: Returns a route to the switch behind @port. */ static inline u64 tb_downstream_route(struct tb_port *port) { return tb_route(port->sw) | ((u64) port->port << (port->sw->config.depth * 8)); } bool tb_is_xdomain_enabled(void); bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type, const void *buf, size_t size); struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent, u64 route, const uuid_t *local_uuid, const uuid_t *remote_uuid); void tb_xdomain_add(struct tb_xdomain *xd); void tb_xdomain_remove(struct tb_xdomain *xd); struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link, u8 depth); static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd) { return tb_to_switch(xd->dev.parent); } /** * tb_xdomain_downstream_port() - Return downstream facing port of parent router * @xd: Xdomain pointer * * Returns the downstream port the XDomain is connected to. */ static inline struct tb_port *tb_xdomain_downstream_port(struct tb_xdomain *xd) { return tb_port_at(xd->route, tb_xdomain_parent(xd)); } int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf, size_t size); int tb_retimer_scan(struct tb_port *port, bool add); void tb_retimer_remove_all(struct tb_port *port); static inline bool tb_is_retimer(const struct device *dev) { return dev->type == &tb_retimer_type; } static inline struct tb_retimer *tb_to_retimer(struct device *dev) { if (tb_is_retimer(dev)) return container_of(dev, struct tb_retimer, dev); return NULL; } /** * usb4_switch_version() - Returns USB4 version of the router * @sw: Router to check * * Returns major version of USB4 router (%1 for v1, %2 for v2 and so * on). Can be called to pre-USB4 router too and in that case returns %0. */ static inline unsigned int usb4_switch_version(const struct tb_switch *sw) { return FIELD_GET(USB4_VERSION_MAJOR_MASK, sw->config.thunderbolt_version); } /** * tb_switch_is_usb4() - Is the switch USB4 compliant * @sw: Switch to check * * Returns true if the @sw is USB4 compliant router, false otherwise. */ static inline bool tb_switch_is_usb4(const struct tb_switch *sw) { return usb4_switch_version(sw) > 0; } void usb4_switch_check_wakes(struct tb_switch *sw); int usb4_switch_setup(struct tb_switch *sw); int usb4_switch_configuration_valid(struct tb_switch *sw); int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid); int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf, size_t size); bool usb4_switch_lane_bonding_possible(struct tb_switch *sw); int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags); int usb4_switch_set_sleep(struct tb_switch *sw); int usb4_switch_nvm_sector_size(struct tb_switch *sw); int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, size_t size); int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address); int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address, const void *buf, size_t size); int usb4_switch_nvm_authenticate(struct tb_switch *sw); int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status); int usb4_switch_credits_init(struct tb_switch *sw); bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in); int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in); int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in); struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw, const struct tb_port *port); struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw, const struct tb_port *port); int usb4_switch_add_ports(struct tb_switch *sw); void usb4_switch_remove_ports(struct tb_switch *sw); int usb4_port_unlock(struct tb_port *port); int usb4_port_hotplug_enable(struct tb_port *port); int usb4_port_reset(struct tb_port *port); int usb4_port_configure(struct tb_port *port); void usb4_port_unconfigure(struct tb_port *port); int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd); void usb4_port_unconfigure_xdomain(struct tb_port *port); int usb4_port_router_offline(struct tb_port *port); int usb4_port_router_online(struct tb_port *port); int usb4_port_enumerate_retimers(struct tb_port *port); bool usb4_port_clx_supported(struct tb_port *port); bool usb4_port_asym_supported(struct tb_port *port); int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width); int usb4_port_asym_start(struct tb_port *port); /** * enum tb_sb_target - Sideband transaction target * @USB4_SB_TARGET_ROUTER: Target is the router itself * @USB4_SB_TARGET_PARTNER: Target is partner * @USB4_SB_TARGET_RETIMER: Target is retimer */ enum usb4_sb_target { USB4_SB_TARGET_ROUTER, USB4_SB_TARGET_PARTNER, USB4_SB_TARGET_RETIMER, }; int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target, u8 index, u8 reg, void *buf, u8 size); int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target, u8 index, u8 reg, const void *buf, u8 size); /** * enum usb4_margin_sw_error_counter - Software margining error counter operation * @USB4_MARGIN_SW_ERROR_COUNTER_NOP: No change in counter setup * @USB4_MARGIN_SW_ERROR_COUNTER_CLEAR: Set the error counter to 0, enable counter * @USB4_MARGIN_SW_ERROR_COUNTER_START: Start counter, count from last value * @USB4_MARGIN_SW_ERROR_COUNTER_STOP: Stop counter, do not clear value */ enum usb4_margin_sw_error_counter { USB4_MARGIN_SW_ERROR_COUNTER_NOP, USB4_MARGIN_SW_ERROR_COUNTER_CLEAR, USB4_MARGIN_SW_ERROR_COUNTER_START, USB4_MARGIN_SW_ERROR_COUNTER_STOP, }; enum usb4_margining_lane { USB4_MARGINING_LANE_RX0 = 0, USB4_MARGINING_LANE_RX1 = 1, USB4_MARGINING_LANE_RX2 = 2, USB4_MARGINING_LANE_ALL = 7, }; /** * struct usb4_port_margining_params - USB4 margining parameters * @error_counter: Error counter operation for software margining * @ber_level: Current BER level contour value * @lanes: Lanes to enable for the margining operation * @voltage_time_offset: Offset for voltage / time for software margining * @optional_voltage_offset_range: Enable optional extended voltage range * @right_high: %false if left/low margin test is performed, %true if right/high * @time: %true if time margining is used instead of voltage */ struct usb4_port_margining_params { enum usb4_margin_sw_error_counter error_counter; u32 ber_level; enum usb4_margining_lane lanes; u32 voltage_time_offset; bool optional_voltage_offset_range; bool right_high; bool upper_eye; bool time; }; int usb4_port_margining_caps(struct tb_port *port, enum usb4_sb_target target, u8 index, u32 *caps, size_t ncaps); int usb4_port_hw_margin(struct tb_port *port, enum usb4_sb_target target, u8 index, const struct usb4_port_margining_params *params, u32 *results, size_t nresults); int usb4_port_sw_margin(struct tb_port *port, enum usb4_sb_target target, u8 index, const struct usb4_port_margining_params *params, u32 *results); int usb4_port_sw_margin_errors(struct tb_port *port, enum usb4_sb_target target, u8 index, u32 *errors); int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index); int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index); int usb4_port_retimer_is_last(struct tb_port *port, u8 index); int usb4_port_retimer_is_cable(struct tb_port *port, u8 index); int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index); int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index, unsigned int address); int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address, const void *buf, size_t size); int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index); int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index, u32 *status); int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index, unsigned int address, void *buf, size_t size); int usb4_usb3_port_max_link_rate(struct tb_port *port); int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw, int *downstream_bw); int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw, int *downstream_bw); int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw, int *downstream_bw); int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id); bool usb4_dp_port_bandwidth_mode_supported(struct tb_port *port); bool usb4_dp_port_bandwidth_mode_enabled(struct tb_port *port); int usb4_dp_port_set_cm_bandwidth_mode_supported(struct tb_port *port, bool supported); int usb4_dp_port_group_id(struct tb_port *port); int usb4_dp_port_set_group_id(struct tb_port *port, int group_id); int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes); int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes); int usb4_dp_port_granularity(struct tb_port *port); int usb4_dp_port_set_granularity(struct tb_port *port, int granularity); int usb4_dp_port_set_estimated_bandwidth(struct tb_port *port, int bw); int usb4_dp_port_allocated_bandwidth(struct tb_port *port); int usb4_dp_port_allocate_bandwidth(struct tb_port *port, int bw); int usb4_dp_port_requested_bandwidth(struct tb_port *port); int usb4_pci_port_set_ext_encapsulation(struct tb_port *port, bool enable); static inline bool tb_is_usb4_port_device(const struct device *dev) { return dev->type == &usb4_port_device_type; } static inline struct usb4_port *tb_to_usb4_port_device(struct device *dev) { if (tb_is_usb4_port_device(dev)) return container_of(dev, struct usb4_port, dev); return NULL; } struct usb4_port *usb4_port_device_add(struct tb_port *port); void usb4_port_device_remove(struct usb4_port *usb4); int usb4_port_device_resume(struct usb4_port *usb4); static inline bool usb4_port_device_is_offline(const struct usb4_port *usb4) { return usb4->offline; } void tb_check_quirks(struct tb_switch *sw); #ifdef CONFIG_ACPI bool tb_acpi_add_links(struct tb_nhi *nhi); bool tb_acpi_is_native(void); bool tb_acpi_may_tunnel_usb3(void); bool tb_acpi_may_tunnel_dp(void); bool tb_acpi_may_tunnel_pcie(void); bool tb_acpi_is_xdomain_allowed(void); int tb_acpi_init(void); void tb_acpi_exit(void); int tb_acpi_power_on_retimers(struct tb_port *port); int tb_acpi_power_off_retimers(struct tb_port *port); #else static inline bool tb_acpi_add_links(struct tb_nhi *nhi) { return false; } static inline bool tb_acpi_is_native(void) { return true; } static inline bool tb_acpi_may_tunnel_usb3(void) { return true; } static inline bool tb_acpi_may_tunnel_dp(void) { return true; } static inline bool tb_acpi_may_tunnel_pcie(void) { return true; } static inline bool tb_acpi_is_xdomain_allowed(void) { return true; } static inline int tb_acpi_init(void) { return 0; } static inline void tb_acpi_exit(void) { } static inline int tb_acpi_power_on_retimers(struct tb_port *port) { return 0; } static inline int tb_acpi_power_off_retimers(struct tb_port *port) { return 0; } #endif #ifdef CONFIG_DEBUG_FS void tb_debugfs_init(void); void tb_debugfs_exit(void); void tb_switch_debugfs_init(struct tb_switch *sw); void tb_switch_debugfs_remove(struct tb_switch *sw); void tb_xdomain_debugfs_init(struct tb_xdomain *xd); void tb_xdomain_debugfs_remove(struct tb_xdomain *xd); void tb_service_debugfs_init(struct tb_service *svc); void tb_service_debugfs_remove(struct tb_service *svc); void tb_retimer_debugfs_init(struct tb_retimer *rt); void tb_retimer_debugfs_remove(struct tb_retimer *rt); #else static inline void tb_debugfs_init(void) { } static inline void tb_debugfs_exit(void) { } static inline void tb_switch_debugfs_init(struct tb_switch *sw) { } static inline void tb_switch_debugfs_remove(struct tb_switch *sw) { } static inline void tb_xdomain_debugfs_init(struct tb_xdomain *xd) { } static inline void tb_xdomain_debugfs_remove(struct tb_xdomain *xd) { } static inline void tb_service_debugfs_init(struct tb_service *svc) { } static inline void tb_service_debugfs_remove(struct tb_service *svc) { } static inline void tb_retimer_debugfs_init(struct tb_retimer *rt) { } static inline void tb_retimer_debugfs_remove(struct tb_retimer *rt) { } #endif #endif
4 3 2 1 4 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2013 Patrick McHardy <kaber@trash.net> */ #include <linux/netfilter_ipv6/ip6_tables.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_SYNPROXY.h> #include <net/netfilter/nf_synproxy.h> static unsigned int synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_synproxy_info *info = par->targinfo; struct net *net = xt_net(par); struct synproxy_net *snet = synproxy_pernet(net); struct synproxy_options opts = {}; struct tcphdr *th, _th; if (nf_ip6_checksum(skb, xt_hooknum(par), par->thoff, IPPROTO_TCP)) return NF_DROP; th = skb_header_pointer(skb, par->thoff, sizeof(_th), &_th); if (th == NULL) return NF_DROP; if (!synproxy_parse_options(skb, par->thoff, th, &opts)) return NF_DROP; if (th->syn && !(th->ack || th->fin || th->rst)) { /* Initial SYN from client */ this_cpu_inc(snet->stats->syn_received); if (th->ece && th->cwr) opts.options |= XT_SYNPROXY_OPT_ECN; opts.options &= info->options; opts.mss_encode = opts.mss_option; opts.mss_option = info->mss; if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP) synproxy_init_timestamp_cookie(info, &opts); else opts.options &= ~(XT_SYNPROXY_OPT_WSCALE | XT_SYNPROXY_OPT_SACK_PERM | XT_SYNPROXY_OPT_ECN); synproxy_send_client_synack_ipv6(net, skb, th, &opts); consume_skb(skb); return NF_STOLEN; } else if (th->ack && !(th->fin || th->rst || th->syn)) { /* ACK from client */ if (synproxy_recv_client_ack_ipv6(net, skb, th, &opts, ntohl(th->seq))) { consume_skb(skb); return NF_STOLEN; } else { return NF_DROP; } } return XT_CONTINUE; } static int synproxy_tg6_check(const struct xt_tgchk_param *par) { struct synproxy_net *snet = synproxy_pernet(par->net); const struct ip6t_entry *e = par->entryinfo; int err; if (!(e->ipv6.flags & IP6T_F_PROTO) || e->ipv6.proto != IPPROTO_TCP || e->ipv6.invflags & XT_INV_PROTO) return -EINVAL; err = nf_ct_netns_get(par->net, par->family); if (err) return err; err = nf_synproxy_ipv6_init(snet, par->net); if (err) { nf_ct_netns_put(par->net, par->family); return err; } return err; } static void synproxy_tg6_destroy(const struct xt_tgdtor_param *par) { struct synproxy_net *snet = synproxy_pernet(par->net); nf_synproxy_ipv6_fini(snet, par->net); nf_ct_netns_put(par->net, par->family); } static struct xt_target synproxy_tg6_reg __read_mostly = { .name = "SYNPROXY", .family = NFPROTO_IPV6, .hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD), .target = synproxy_tg6, .targetsize = sizeof(struct xt_synproxy_info), .checkentry = synproxy_tg6_check, .destroy = synproxy_tg6_destroy, .me = THIS_MODULE, }; static int __init synproxy_tg6_init(void) { return xt_register_target(&synproxy_tg6_reg); } static void __exit synproxy_tg6_exit(void) { xt_unregister_target(&synproxy_tg6_reg); } module_init(synproxy_tg6_init); module_exit(synproxy_tg6_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); MODULE_DESCRIPTION("Intercept IPv6 TCP connections and establish them using syncookies");
7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 // SPDX-License-Identifier: GPL-2.0-or-later /* * xpress_decompress.c - A decompressor for the XPRESS compression format * (Huffman variant), which can be used in "System Compressed" files. This is * based on the code from wimlib. * * Copyright (C) 2015 Eric Biggers */ #include "decompress_common.h" #include "lib.h" #define XPRESS_NUM_SYMBOLS 512 #define XPRESS_MAX_CODEWORD_LEN 15 #define XPRESS_MIN_MATCH_LEN 3 /* This value is chosen for fast decompression. */ #define XPRESS_TABLEBITS 12 /* Reusable heap-allocated memory for XPRESS decompression */ struct xpress_decompressor { /* The Huffman decoding table */ u16 decode_table[(1 << XPRESS_TABLEBITS) + 2 * XPRESS_NUM_SYMBOLS]; /* An array that maps symbols to codeword lengths */ u8 lens[XPRESS_NUM_SYMBOLS]; /* Temporary space for make_huffman_decode_table() */ u16 working_space[2 * (1 + XPRESS_MAX_CODEWORD_LEN) + XPRESS_NUM_SYMBOLS]; }; /* * xpress_allocate_decompressor - Allocate an XPRESS decompressor * * Return the pointer to the decompressor on success, or return NULL and set * errno on failure. */ struct xpress_decompressor *xpress_allocate_decompressor(void) { return kmalloc(sizeof(struct xpress_decompressor), GFP_NOFS); } /* * xpress_decompress - Decompress a buffer of XPRESS-compressed data * * @decompressor: A decompressor that was allocated with * xpress_allocate_decompressor() * @compressed_data: The buffer of data to decompress * @compressed_size: Number of bytes of compressed data * @uncompressed_data: The buffer in which to store the decompressed data * @uncompressed_size: The number of bytes the data decompresses into * * Return 0 on success, or return -1 and set errno on failure. */ int xpress_decompress(struct xpress_decompressor *decompressor, const void *compressed_data, size_t compressed_size, void *uncompressed_data, size_t uncompressed_size) { struct xpress_decompressor *d = decompressor; const u8 * const in_begin = compressed_data; u8 * const out_begin = uncompressed_data; u8 *out_next = out_begin; u8 * const out_end = out_begin + uncompressed_size; struct input_bitstream is; u32 i; /* Read the Huffman codeword lengths. */ if (compressed_size < XPRESS_NUM_SYMBOLS / 2) goto invalid; for (i = 0; i < XPRESS_NUM_SYMBOLS / 2; i++) { d->lens[i*2 + 0] = in_begin[i] & 0xF; d->lens[i*2 + 1] = in_begin[i] >> 4; } /* Build a decoding table for the Huffman code. */ if (make_huffman_decode_table(d->decode_table, XPRESS_NUM_SYMBOLS, XPRESS_TABLEBITS, d->lens, XPRESS_MAX_CODEWORD_LEN, d->working_space)) goto invalid; /* Decode the matches and literals. */ init_input_bitstream(&is, in_begin + XPRESS_NUM_SYMBOLS / 2, compressed_size - XPRESS_NUM_SYMBOLS / 2); while (out_next != out_end) { u32 sym; u32 log2_offset; u32 length; u32 offset; sym = read_huffsym(&is, d->decode_table, XPRESS_TABLEBITS, XPRESS_MAX_CODEWORD_LEN); if (sym < 256) { /* Literal */ *out_next++ = sym; } else { /* Match */ length = sym & 0xf; log2_offset = (sym >> 4) & 0xf; bitstream_ensure_bits(&is, 16); offset = ((u32)1 << log2_offset) | bitstream_pop_bits(&is, log2_offset); if (length == 0xf) { length += bitstream_read_byte(&is); if (length == 0xf + 0xff) length = bitstream_read_u16(&is); } length += XPRESS_MIN_MATCH_LEN; if (offset > (size_t)(out_next - out_begin)) goto invalid; if (length > (size_t)(out_end - out_next)) goto invalid; out_next = lz_copy(out_next, length, offset, out_end, XPRESS_MIN_MATCH_LEN); } } return 0; invalid: return -1; } /* * xpress_free_decompressor - Free an XPRESS decompressor * * @decompressor: A decompressor that was allocated with * xpress_allocate_decompressor(), or NULL. */ void xpress_free_decompressor(struct xpress_decompressor *decompressor) { kfree(decompressor); }
40 2 4 4 3 40 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /** * file phonet.h * * Phonet sockets kernel interface * * Copyright (C) 2008 Nokia Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #ifndef _UAPILINUX_PHONET_H #define _UAPILINUX_PHONET_H #include <linux/types.h> #include <linux/socket.h> /* Automatic protocol selection */ #define PN_PROTO_TRANSPORT 0 /* Phonet datagram socket */ #define PN_PROTO_PHONET 1 /* Phonet pipe */ #define PN_PROTO_PIPE 2 #define PHONET_NPROTO 3 /* Socket options for SOL_PNPIPE level */ #define PNPIPE_ENCAP 1 #define PNPIPE_IFINDEX 2 #define PNPIPE_HANDLE 3 #define PNPIPE_INITSTATE 4 #define PNADDR_ANY 0 #define PNADDR_BROADCAST 0xFC #define PNPORT_RESOURCE_ROUTING 0 /* Values for PNPIPE_ENCAP option */ #define PNPIPE_ENCAP_NONE 0 #define PNPIPE_ENCAP_IP 1 /* ioctls */ #define SIOCPNGETOBJECT (SIOCPROTOPRIVATE + 0) #define SIOCPNENABLEPIPE (SIOCPROTOPRIVATE + 13) #define SIOCPNADDRESOURCE (SIOCPROTOPRIVATE + 14) #define SIOCPNDELRESOURCE (SIOCPROTOPRIVATE + 15) /* Phonet protocol header */ struct phonethdr { __u8 pn_rdev; __u8 pn_sdev; __u8 pn_res; __be16 pn_length; __u8 pn_robj; __u8 pn_sobj; } __attribute__((packed)); /* Common Phonet payload header */ struct phonetmsg { __u8 pn_trans_id; /* transaction ID */ __u8 pn_msg_id; /* message type */ union { struct { __u8 pn_submsg_id; /* message subtype */ __u8 pn_data[5]; } base; struct { __u16 pn_e_res_id; /* extended resource ID */ __u8 pn_e_submsg_id; /* message subtype */ __u8 pn_e_data[3]; } ext; } pn_msg_u; }; #define PN_COMMON_MESSAGE 0xF0 #define PN_COMMGR 0x10 #define PN_PREFIX 0xE0 /* resource for extended messages */ #define pn_submsg_id pn_msg_u.base.pn_submsg_id #define pn_e_submsg_id pn_msg_u.ext.pn_e_submsg_id #define pn_e_res_id pn_msg_u.ext.pn_e_res_id #define pn_data pn_msg_u.base.pn_data #define pn_e_data pn_msg_u.ext.pn_e_data /* data for unreachable errors */ #define PN_COMM_SERVICE_NOT_IDENTIFIED_RESP 0x01 #define PN_COMM_ISA_ENTITY_NOT_REACHABLE_RESP 0x14 #define pn_orig_msg_id pn_data[0] #define pn_status pn_data[1] #define pn_e_orig_msg_id pn_e_data[0] #define pn_e_status pn_e_data[1] /* Phonet socket address structure */ struct sockaddr_pn { __kernel_sa_family_t spn_family; __u8 spn_obj; __u8 spn_dev; __u8 spn_resource; __u8 spn_zero[sizeof(struct sockaddr) - sizeof(__kernel_sa_family_t) - 3]; } __attribute__((packed)); /* Well known address */ #define PN_DEV_PC 0x10 static inline __u16 pn_object(__u8 addr, __u16 port) { return (addr << 8) | (port & 0x3ff); } static inline __u8 pn_obj(__u16 handle) { return handle & 0xff; } static inline __u8 pn_dev(__u16 handle) { return handle >> 8; } static inline __u16 pn_port(__u16 handle) { return handle & 0x3ff; } static inline __u8 pn_addr(__u16 handle) { return (handle >> 8) & 0xfc; } static inline void pn_sockaddr_set_addr(struct sockaddr_pn *spn, __u8 addr) { spn->spn_dev &= 0x03; spn->spn_dev |= addr & 0xfc; } static inline void pn_sockaddr_set_port(struct sockaddr_pn *spn, __u16 port) { spn->spn_dev &= 0xfc; spn->spn_dev |= (port >> 8) & 0x03; spn->spn_obj = port & 0xff; } static inline void pn_sockaddr_set_object(struct sockaddr_pn *spn, __u16 handle) { spn->spn_dev = pn_dev(handle); spn->spn_obj = pn_obj(handle); } static inline void pn_sockaddr_set_resource(struct sockaddr_pn *spn, __u8 resource) { spn->spn_resource = resource; } static inline __u8 pn_sockaddr_get_addr(const struct sockaddr_pn *spn) { return spn->spn_dev & 0xfc; } static inline __u16 pn_sockaddr_get_port(const struct sockaddr_pn *spn) { return ((spn->spn_dev & 0x03) << 8) | spn->spn_obj; } static inline __u16 pn_sockaddr_get_object(const struct sockaddr_pn *spn) { return pn_object(spn->spn_dev, spn->spn_obj); } static inline __u8 pn_sockaddr_get_resource(const struct sockaddr_pn *spn) { return spn->spn_resource; } /* Phonet device ioctl requests */ #endif /* _UAPILINUX_PHONET_H */
21 17 17 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 // SPDX-License-Identifier: GPL-2.0-or-later /* Null security operations. * * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #include <net/af_rxrpc.h> #include "ar-internal.h" static int none_init_connection_security(struct rxrpc_connection *conn, struct rxrpc_key_token *token) { return 0; } /* * Allocate an appropriately sized buffer for the amount of data remaining. */ static struct rxrpc_txbuf *none_alloc_txbuf(struct rxrpc_call *call, size_t remain, gfp_t gfp) { return rxrpc_alloc_data_txbuf(call, umin(remain, RXRPC_JUMBO_DATALEN), 1, gfp); } static int none_secure_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb) { txb->pkt_len = txb->len; if (txb->len == RXRPC_JUMBO_DATALEN) txb->jumboable = true; return 0; } static int none_verify_packet(struct rxrpc_call *call, struct sk_buff *skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); sp->flags |= RXRPC_RX_VERIFIED; return 0; } static void none_free_call_crypto(struct rxrpc_call *call) { } static int none_respond_to_challenge(struct rxrpc_connection *conn, struct sk_buff *skb) { return rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO, rxrpc_eproto_rxnull_challenge); } static int none_verify_response(struct rxrpc_connection *conn, struct sk_buff *skb) { return rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO, rxrpc_eproto_rxnull_response); } static void none_clear(struct rxrpc_connection *conn) { } static int none_init(void) { return 0; } static void none_exit(void) { } /* * RxRPC Kerberos-based security */ const struct rxrpc_security rxrpc_no_security = { .name = "none", .security_index = RXRPC_SECURITY_NONE, .init = none_init, .exit = none_exit, .init_connection_security = none_init_connection_security, .free_call_crypto = none_free_call_crypto, .alloc_txbuf = none_alloc_txbuf, .secure_packet = none_secure_packet, .verify_packet = none_verify_packet, .respond_to_challenge = none_respond_to_challenge, .verify_response = none_verify_response, .clear = none_clear, };
16 17 5 3 16 5 7 7 5 7 7 7 27 27 14 33 3 33 24 24 13 24 13 16 5 15 6 15 16 19 4 19 4 19 13 19 15 19 11 19 13 11 13 19 13 19 17 19 3 19 19 24 3 2 3 21 19 19 19 21 19 23 24 2 2 22 24 24 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 // SPDX-License-Identifier: GPL-2.0 /* * NTP state machine interfaces and logic. * * This code was mainly moved from kernel/timer.c and kernel/time.c * Please see those files for relevant copyright info and historical * changelogs. */ #include <linux/capability.h> #include <linux/clocksource.h> #include <linux/workqueue.h> #include <linux/hrtimer.h> #include <linux/jiffies.h> #include <linux/math64.h> #include <linux/timex.h> #include <linux/time.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/rtc.h> #include <linux/audit.h> #include "ntp_internal.h" #include "timekeeping_internal.h" /** * struct ntp_data - Structure holding all NTP related state * @tick_usec: USER_HZ period in microseconds * @tick_length: Adjusted tick length * @tick_length_base: Base value for @tick_length * @time_state: State of the clock synchronization * @time_status: Clock status bits * @time_offset: Time adjustment in nanoseconds * @time_constant: PLL time constant * @time_maxerror: Maximum error in microseconds holding the NTP sync distance * (NTP dispersion + delay / 2) * @time_esterror: Estimated error in microseconds holding NTP dispersion * @time_freq: Frequency offset scaled nsecs/secs * @time_reftime: Time at last adjustment in seconds * @time_adjust: Adjustment value * @ntp_tick_adj: Constant boot-param configurable NTP tick adjustment (upscaled) * @ntp_next_leap_sec: Second value of the next pending leapsecond, or TIME64_MAX if no leap * * @pps_valid: PPS signal watchdog counter * @pps_tf: PPS phase median filter * @pps_jitter: PPS current jitter in nanoseconds * @pps_fbase: PPS beginning of the last freq interval * @pps_shift: PPS current interval duration in seconds (shift value) * @pps_intcnt: PPS interval counter * @pps_freq: PPS frequency offset in scaled ns/s * @pps_stabil: PPS current stability in scaled ns/s * @pps_calcnt: PPS monitor: calibration intervals * @pps_jitcnt: PPS monitor: jitter limit exceeded * @pps_stbcnt: PPS monitor: stability limit exceeded * @pps_errcnt: PPS monitor: calibration errors * * Protected by the timekeeping locks. */ struct ntp_data { unsigned long tick_usec; u64 tick_length; u64 tick_length_base; int time_state; int time_status; s64 time_offset; long time_constant; long time_maxerror; long time_esterror; s64 time_freq; time64_t time_reftime; long time_adjust; s64 ntp_tick_adj; time64_t ntp_next_leap_sec; #ifdef CONFIG_NTP_PPS int pps_valid; long pps_tf[3]; long pps_jitter; struct timespec64 pps_fbase; int pps_shift; int pps_intcnt; s64 pps_freq; long pps_stabil; long pps_calcnt; long pps_jitcnt; long pps_stbcnt; long pps_errcnt; #endif }; static struct ntp_data tk_ntp_data = { .tick_usec = USER_TICK_USEC, .time_state = TIME_OK, .time_status = STA_UNSYNC, .time_constant = 2, .time_maxerror = NTP_PHASE_LIMIT, .time_esterror = NTP_PHASE_LIMIT, .ntp_next_leap_sec = TIME64_MAX, }; #define SECS_PER_DAY 86400 #define MAX_TICKADJ 500LL /* usecs */ #define MAX_TICKADJ_SCALED \ (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ) #define MAX_TAI_OFFSET 100000 #ifdef CONFIG_NTP_PPS /* * The following variables are used when a pulse-per-second (PPS) signal * is available. They establish the engineering parameters of the clock * discipline loop when controlled by the PPS signal. */ #define PPS_VALID 10 /* PPS signal watchdog max (s) */ #define PPS_POPCORN 4 /* popcorn spike threshold (shift) */ #define PPS_INTMIN 2 /* min freq interval (s) (shift) */ #define PPS_INTMAX 8 /* max freq interval (s) (shift) */ #define PPS_INTCOUNT 4 /* number of consecutive good intervals to increase pps_shift or consecutive bad intervals to decrease it */ #define PPS_MAXWANDER 100000 /* max PPS freq wander (ns/s) */ /* * PPS kernel consumer compensates the whole phase error immediately. * Otherwise, reduce the offset by a fixed factor times the time constant. */ static inline s64 ntp_offset_chunk(struct ntp_data *ntpdata, s64 offset) { if (ntpdata->time_status & STA_PPSTIME && ntpdata->time_status & STA_PPSSIGNAL) return offset; else return shift_right(offset, SHIFT_PLL + ntpdata->time_constant); } static inline void pps_reset_freq_interval(struct ntp_data *ntpdata) { /* The PPS calibration interval may end surprisingly early */ ntpdata->pps_shift = PPS_INTMIN; ntpdata->pps_intcnt = 0; } /** * pps_clear - Clears the PPS state variables * @ntpdata: Pointer to ntp data */ static inline void pps_clear(struct ntp_data *ntpdata) { pps_reset_freq_interval(ntpdata); ntpdata->pps_tf[0] = 0; ntpdata->pps_tf[1] = 0; ntpdata->pps_tf[2] = 0; ntpdata->pps_fbase.tv_sec = ntpdata->pps_fbase.tv_nsec = 0; ntpdata->pps_freq = 0; } /* * Decrease pps_valid to indicate that another second has passed since the * last PPS signal. When it reaches 0, indicate that PPS signal is missing. */ static inline void pps_dec_valid(struct ntp_data *ntpdata) { if (ntpdata->pps_valid > 0) { ntpdata->pps_valid--; } else { ntpdata->time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR); pps_clear(ntpdata); } } static inline void pps_set_freq(struct ntp_data *ntpdata) { ntpdata->pps_freq = ntpdata->time_freq; } static inline bool is_error_status(int status) { return (status & (STA_UNSYNC|STA_CLOCKERR)) /* * PPS signal lost when either PPS time or PPS frequency * synchronization requested */ || ((status & (STA_PPSFREQ|STA_PPSTIME)) && !(status & STA_PPSSIGNAL)) /* * PPS jitter exceeded when PPS time synchronization * requested */ || ((status & (STA_PPSTIME|STA_PPSJITTER)) == (STA_PPSTIME|STA_PPSJITTER)) /* * PPS wander exceeded or calibration error when PPS * frequency synchronization requested */ || ((status & STA_PPSFREQ) && (status & (STA_PPSWANDER|STA_PPSERROR))); } static inline void pps_fill_timex(struct ntp_data *ntpdata, struct __kernel_timex *txc) { txc->ppsfreq = shift_right((ntpdata->pps_freq >> PPM_SCALE_INV_SHIFT) * PPM_SCALE_INV, NTP_SCALE_SHIFT); txc->jitter = ntpdata->pps_jitter; if (!(ntpdata->time_status & STA_NANO)) txc->jitter = ntpdata->pps_jitter / NSEC_PER_USEC; txc->shift = ntpdata->pps_shift; txc->stabil = ntpdata->pps_stabil; txc->jitcnt = ntpdata->pps_jitcnt; txc->calcnt = ntpdata->pps_calcnt; txc->errcnt = ntpdata->pps_errcnt; txc->stbcnt = ntpdata->pps_stbcnt; } #else /* !CONFIG_NTP_PPS */ static inline s64 ntp_offset_chunk(struct ntp_data *ntpdata, s64 offset) { return shift_right(offset, SHIFT_PLL + ntpdata->time_constant); } static inline void pps_reset_freq_interval(struct ntp_data *ntpdata) {} static inline void pps_clear(struct ntp_data *ntpdata) {} static inline void pps_dec_valid(struct ntp_data *ntpdata) {} static inline void pps_set_freq(struct ntp_data *ntpdata) {} static inline bool is_error_status(int status) { return status & (STA_UNSYNC|STA_CLOCKERR); } static inline void pps_fill_timex(struct ntp_data *ntpdata, struct __kernel_timex *txc) { /* PPS is not implemented, so these are zero */ txc->ppsfreq = 0; txc->jitter = 0; txc->shift = 0; txc->stabil = 0; txc->jitcnt = 0; txc->calcnt = 0; txc->errcnt = 0; txc->stbcnt = 0; } #endif /* CONFIG_NTP_PPS */ /* * Update tick_length and tick_length_base, based on tick_usec, ntp_tick_adj and * time_freq: */ static void ntp_update_frequency(struct ntp_data *ntpdata) { u64 second_length, new_base, tick_usec = (u64)ntpdata->tick_usec; second_length = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ) << NTP_SCALE_SHIFT; second_length += ntpdata->ntp_tick_adj; second_length += ntpdata->time_freq; new_base = div_u64(second_length, NTP_INTERVAL_FREQ); /* * Don't wait for the next second_overflow, apply the change to the * tick length immediately: */ ntpdata->tick_length += new_base - ntpdata->tick_length_base; ntpdata->tick_length_base = new_base; } static inline s64 ntp_update_offset_fll(struct ntp_data *ntpdata, s64 offset64, long secs) { ntpdata->time_status &= ~STA_MODE; if (secs < MINSEC) return 0; if (!(ntpdata->time_status & STA_FLL) && (secs <= MAXSEC)) return 0; ntpdata->time_status |= STA_MODE; return div64_long(offset64 << (NTP_SCALE_SHIFT - SHIFT_FLL), secs); } static void ntp_update_offset(struct ntp_data *ntpdata, long offset) { s64 freq_adj, offset64; long secs, real_secs; if (!(ntpdata->time_status & STA_PLL)) return; if (!(ntpdata->time_status & STA_NANO)) { /* Make sure the multiplication below won't overflow */ offset = clamp(offset, -USEC_PER_SEC, USEC_PER_SEC); offset *= NSEC_PER_USEC; } /* Scale the phase adjustment and clamp to the operating range. */ offset = clamp(offset, -MAXPHASE, MAXPHASE); /* * Select how the frequency is to be controlled * and in which mode (PLL or FLL). */ real_secs = __ktime_get_real_seconds(); secs = (long)(real_secs - ntpdata->time_reftime); if (unlikely(ntpdata->time_status & STA_FREQHOLD)) secs = 0; ntpdata->time_reftime = real_secs; offset64 = offset; freq_adj = ntp_update_offset_fll(ntpdata, offset64, secs); /* * Clamp update interval to reduce PLL gain with low * sampling rate (e.g. intermittent network connection) * to avoid instability. */ if (unlikely(secs > 1 << (SHIFT_PLL + 1 + ntpdata->time_constant))) secs = 1 << (SHIFT_PLL + 1 + ntpdata->time_constant); freq_adj += (offset64 * secs) << (NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + ntpdata->time_constant)); freq_adj = min(freq_adj + ntpdata->time_freq, MAXFREQ_SCALED); ntpdata->time_freq = max(freq_adj, -MAXFREQ_SCALED); ntpdata->time_offset = div_s64(offset64 << NTP_SCALE_SHIFT, NTP_INTERVAL_FREQ); } static void __ntp_clear(struct ntp_data *ntpdata) { /* Stop active adjtime() */ ntpdata->time_adjust = 0; ntpdata->time_status |= STA_UNSYNC; ntpdata->time_maxerror = NTP_PHASE_LIMIT; ntpdata->time_esterror = NTP_PHASE_LIMIT; ntp_update_frequency(ntpdata); ntpdata->tick_length = ntpdata->tick_length_base; ntpdata->time_offset = 0; ntpdata->ntp_next_leap_sec = TIME64_MAX; /* Clear PPS state variables */ pps_clear(ntpdata); } /** * ntp_clear - Clears the NTP state variables */ void ntp_clear(void) { __ntp_clear(&tk_ntp_data); } u64 ntp_tick_length(void) { return tk_ntp_data.tick_length; } /** * ntp_get_next_leap - Returns the next leapsecond in CLOCK_REALTIME ktime_t * * Provides the time of the next leapsecond against CLOCK_REALTIME in * a ktime_t format. Returns KTIME_MAX if no leapsecond is pending. */ ktime_t ntp_get_next_leap(void) { struct ntp_data *ntpdata = &tk_ntp_data; ktime_t ret; if ((ntpdata->time_state == TIME_INS) && (ntpdata->time_status & STA_INS)) return ktime_set(ntpdata->ntp_next_leap_sec, 0); ret = KTIME_MAX; return ret; } /* * This routine handles the overflow of the microsecond field * * The tricky bits of code to handle the accurate clock support * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame. * They were originally developed for SUN and DEC kernels. * All the kudos should go to Dave for this stuff. * * Also handles leap second processing, and returns leap offset */ int second_overflow(time64_t secs) { struct ntp_data *ntpdata = &tk_ntp_data; s64 delta; int leap = 0; s32 rem; /* * Leap second processing. If in leap-insert state at the end of the * day, the system clock is set back one second; if in leap-delete * state, the system clock is set ahead one second. */ switch (ntpdata->time_state) { case TIME_OK: if (ntpdata->time_status & STA_INS) { ntpdata->time_state = TIME_INS; div_s64_rem(secs, SECS_PER_DAY, &rem); ntpdata->ntp_next_leap_sec = secs + SECS_PER_DAY - rem; } else if (ntpdata->time_status & STA_DEL) { ntpdata->time_state = TIME_DEL; div_s64_rem(secs + 1, SECS_PER_DAY, &rem); ntpdata->ntp_next_leap_sec = secs + SECS_PER_DAY - rem; } break; case TIME_INS: if (!(ntpdata->time_status & STA_INS)) { ntpdata->ntp_next_leap_sec = TIME64_MAX; ntpdata->time_state = TIME_OK; } else if (secs == ntpdata->ntp_next_leap_sec) { leap = -1; ntpdata->time_state = TIME_OOP; pr_notice("Clock: inserting leap second 23:59:60 UTC\n"); } break; case TIME_DEL: if (!(ntpdata->time_status & STA_DEL)) { ntpdata->ntp_next_leap_sec = TIME64_MAX; ntpdata->time_state = TIME_OK; } else if (secs == ntpdata->ntp_next_leap_sec) { leap = 1; ntpdata->ntp_next_leap_sec = TIME64_MAX; ntpdata->time_state = TIME_WAIT; pr_notice("Clock: deleting leap second 23:59:59 UTC\n"); } break; case TIME_OOP: ntpdata->ntp_next_leap_sec = TIME64_MAX; ntpdata->time_state = TIME_WAIT; break; case TIME_WAIT: if (!(ntpdata->time_status & (STA_INS | STA_DEL))) ntpdata->time_state = TIME_OK; break; } /* Bump the maxerror field */ ntpdata->time_maxerror += MAXFREQ / NSEC_PER_USEC; if (ntpdata->time_maxerror > NTP_PHASE_LIMIT) { ntpdata->time_maxerror = NTP_PHASE_LIMIT; ntpdata->time_status |= STA_UNSYNC; } /* Compute the phase adjustment for the next second */ ntpdata->tick_length = ntpdata->tick_length_base; delta = ntp_offset_chunk(ntpdata, ntpdata->time_offset); ntpdata->time_offset -= delta; ntpdata->tick_length += delta; /* Check PPS signal */ pps_dec_valid(ntpdata); if (!ntpdata->time_adjust) goto out; if (ntpdata->time_adjust > MAX_TICKADJ) { ntpdata->time_adjust -= MAX_TICKADJ; ntpdata->tick_length += MAX_TICKADJ_SCALED; goto out; } if (ntpdata->time_adjust < -MAX_TICKADJ) { ntpdata->time_adjust += MAX_TICKADJ; ntpdata->tick_length -= MAX_TICKADJ_SCALED; goto out; } ntpdata->tick_length += (s64)(ntpdata->time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ) << NTP_SCALE_SHIFT; ntpdata->time_adjust = 0; out: return leap; } #if defined(CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC) static void sync_hw_clock(struct work_struct *work); static DECLARE_WORK(sync_work, sync_hw_clock); static struct hrtimer sync_hrtimer; #define SYNC_PERIOD_NS (11ULL * 60 * NSEC_PER_SEC) static enum hrtimer_restart sync_timer_callback(struct hrtimer *timer) { queue_work(system_freezable_power_efficient_wq, &sync_work); return HRTIMER_NORESTART; } static void sched_sync_hw_clock(unsigned long offset_nsec, bool retry) { ktime_t exp = ktime_set(ktime_get_real_seconds(), 0); if (retry) exp = ktime_add_ns(exp, 2ULL * NSEC_PER_SEC - offset_nsec); else exp = ktime_add_ns(exp, SYNC_PERIOD_NS - offset_nsec); hrtimer_start(&sync_hrtimer, exp, HRTIMER_MODE_ABS); } /* * Check whether @now is correct versus the required time to update the RTC * and calculate the value which needs to be written to the RTC so that the * next seconds increment of the RTC after the write is aligned with the next * seconds increment of clock REALTIME. * * tsched t1 write(t2.tv_sec - 1sec)) t2 RTC increments seconds * * t2.tv_nsec == 0 * tsched = t2 - set_offset_nsec * newval = t2 - NSEC_PER_SEC * * ==> neval = tsched + set_offset_nsec - NSEC_PER_SEC * * As the execution of this code is not guaranteed to happen exactly at * tsched this allows it to happen within a fuzzy region: * * abs(now - tsched) < FUZZ * * If @now is not inside the allowed window the function returns false. */ static inline bool rtc_tv_nsec_ok(unsigned long set_offset_nsec, struct timespec64 *to_set, const struct timespec64 *now) { /* Allowed error in tv_nsec, arbitrarily set to 5 jiffies in ns. */ const unsigned long TIME_SET_NSEC_FUZZ = TICK_NSEC * 5; struct timespec64 delay = {.tv_sec = -1, .tv_nsec = set_offset_nsec}; *to_set = timespec64_add(*now, delay); if (to_set->tv_nsec < TIME_SET_NSEC_FUZZ) { to_set->tv_nsec = 0; return true; } if (to_set->tv_nsec > NSEC_PER_SEC - TIME_SET_NSEC_FUZZ) { to_set->tv_sec++; to_set->tv_nsec = 0; return true; } return false; } #ifdef CONFIG_GENERIC_CMOS_UPDATE int __weak update_persistent_clock64(struct timespec64 now64) { return -ENODEV; } #else static inline int update_persistent_clock64(struct timespec64 now64) { return -ENODEV; } #endif #ifdef CONFIG_RTC_SYSTOHC /* Save NTP synchronized time to the RTC */ static int update_rtc(struct timespec64 *to_set, unsigned long *offset_nsec) { struct rtc_device *rtc; struct rtc_time tm; int err = -ENODEV; rtc = rtc_class_open(CONFIG_RTC_SYSTOHC_DEVICE); if (!rtc) return -ENODEV; if (!rtc->ops || !rtc->ops->set_time) goto out_close; /* First call might not have the correct offset */ if (*offset_nsec == rtc->set_offset_nsec) { rtc_time64_to_tm(to_set->tv_sec, &tm); err = rtc_set_time(rtc, &tm); } else { /* Store the update offset and let the caller try again */ *offset_nsec = rtc->set_offset_nsec; err = -EAGAIN; } out_close: rtc_class_close(rtc); return err; } #else static inline int update_rtc(struct timespec64 *to_set, unsigned long *offset_nsec) { return -ENODEV; } #endif /** * ntp_synced - Tells whether the NTP status is not UNSYNC * Returns: true if not UNSYNC, false otherwise */ static inline bool ntp_synced(void) { return !(tk_ntp_data.time_status & STA_UNSYNC); } /* * If we have an externally synchronized Linux clock, then update RTC clock * accordingly every ~11 minutes. Generally RTCs can only store second * precision, but many RTCs will adjust the phase of their second tick to * match the moment of update. This infrastructure arranges to call to the RTC * set at the correct moment to phase synchronize the RTC second tick over * with the kernel clock. */ static void sync_hw_clock(struct work_struct *work) { /* * The default synchronization offset is 500ms for the deprecated * update_persistent_clock64() under the assumption that it uses * the infamous CMOS clock (MC146818). */ static unsigned long offset_nsec = NSEC_PER_SEC / 2; struct timespec64 now, to_set; int res = -EAGAIN; /* * Don't update if STA_UNSYNC is set and if ntp_notify_cmos_timer() * managed to schedule the work between the timer firing and the * work being able to rearm the timer. Wait for the timer to expire. */ if (!ntp_synced() || hrtimer_is_queued(&sync_hrtimer)) return; ktime_get_real_ts64(&now); /* If @now is not in the allowed window, try again */ if (!rtc_tv_nsec_ok(offset_nsec, &to_set, &now)) goto rearm; /* Take timezone adjusted RTCs into account */ if (persistent_clock_is_local) to_set.tv_sec -= (sys_tz.tz_minuteswest * 60); /* Try the legacy RTC first. */ res = update_persistent_clock64(to_set); if (res != -ENODEV) goto rearm; /* Try the RTC class */ res = update_rtc(&to_set, &offset_nsec); if (res == -ENODEV) return; rearm: sched_sync_hw_clock(offset_nsec, res != 0); } void ntp_notify_cmos_timer(bool offset_set) { /* * If the time jumped (using ADJ_SETOFFSET) cancels sync timer, * which may have been running if the time was synchronized * prior to the ADJ_SETOFFSET call. */ if (offset_set) hrtimer_cancel(&sync_hrtimer); /* * When the work is currently executed but has not yet the timer * rearmed this queues the work immediately again. No big issue, * just a pointless work scheduled. */ if (ntp_synced() && !hrtimer_is_queued(&sync_hrtimer)) queue_work(system_freezable_power_efficient_wq, &sync_work); } static void __init ntp_init_cmos_sync(void) { hrtimer_setup(&sync_hrtimer, sync_timer_callback, CLOCK_REALTIME, HRTIMER_MODE_ABS); } #else /* CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC) */ static inline void __init ntp_init_cmos_sync(void) { } #endif /* !CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC) */ /* * Propagate a new txc->status value into the NTP state: */ static inline void process_adj_status(struct ntp_data *ntpdata, const struct __kernel_timex *txc) { if ((ntpdata->time_status & STA_PLL) && !(txc->status & STA_PLL)) { ntpdata->time_state = TIME_OK; ntpdata->time_status = STA_UNSYNC; ntpdata->ntp_next_leap_sec = TIME64_MAX; /* Restart PPS frequency calibration */ pps_reset_freq_interval(ntpdata); } /* * If we turn on PLL adjustments then reset the * reference time to current time. */ if (!(ntpdata->time_status & STA_PLL) && (txc->status & STA_PLL)) ntpdata->time_reftime = __ktime_get_real_seconds(); /* only set allowed bits */ ntpdata->time_status &= STA_RONLY; ntpdata->time_status |= txc->status & ~STA_RONLY; } static inline void process_adjtimex_modes(struct ntp_data *ntpdata, const struct __kernel_timex *txc, s32 *time_tai) { if (txc->modes & ADJ_STATUS) process_adj_status(ntpdata, txc); if (txc->modes & ADJ_NANO) ntpdata->time_status |= STA_NANO; if (txc->modes & ADJ_MICRO) ntpdata->time_status &= ~STA_NANO; if (txc->modes & ADJ_FREQUENCY) { ntpdata->time_freq = txc->freq * PPM_SCALE; ntpdata->time_freq = min(ntpdata->time_freq, MAXFREQ_SCALED); ntpdata->time_freq = max(ntpdata->time_freq, -MAXFREQ_SCALED); /* Update pps_freq */ pps_set_freq(ntpdata); } if (txc->modes & ADJ_MAXERROR) ntpdata->time_maxerror = clamp(txc->maxerror, 0, NTP_PHASE_LIMIT); if (txc->modes & ADJ_ESTERROR) ntpdata->time_esterror = clamp(txc->esterror, 0, NTP_PHASE_LIMIT); if (txc->modes & ADJ_TIMECONST) { ntpdata->time_constant = clamp(txc->constant, 0, MAXTC); if (!(ntpdata->time_status & STA_NANO)) ntpdata->time_constant += 4; ntpdata->time_constant = clamp(ntpdata->time_constant, 0, MAXTC); } if (txc->modes & ADJ_TAI && txc->constant >= 0 && txc->constant <= MAX_TAI_OFFSET) *time_tai = txc->constant; if (txc->modes & ADJ_OFFSET) ntp_update_offset(ntpdata, txc->offset); if (txc->modes & ADJ_TICK) ntpdata->tick_usec = txc->tick; if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET)) ntp_update_frequency(ntpdata); } /* * adjtimex() mainly allows reading (and writing, if superuser) of * kernel time-keeping variables. used by xntpd. */ int __do_adjtimex(struct __kernel_timex *txc, const struct timespec64 *ts, s32 *time_tai, struct audit_ntp_data *ad) { struct ntp_data *ntpdata = &tk_ntp_data; int result; if (txc->modes & ADJ_ADJTIME) { long save_adjust = ntpdata->time_adjust; if (!(txc->modes & ADJ_OFFSET_READONLY)) { /* adjtime() is independent from ntp_adjtime() */ ntpdata->time_adjust = txc->offset; ntp_update_frequency(ntpdata); audit_ntp_set_old(ad, AUDIT_NTP_ADJUST, save_adjust); audit_ntp_set_new(ad, AUDIT_NTP_ADJUST, ntpdata->time_adjust); } txc->offset = save_adjust; } else { /* If there are input parameters, then process them: */ if (txc->modes) { audit_ntp_set_old(ad, AUDIT_NTP_OFFSET, ntpdata->time_offset); audit_ntp_set_old(ad, AUDIT_NTP_FREQ, ntpdata->time_freq); audit_ntp_set_old(ad, AUDIT_NTP_STATUS, ntpdata->time_status); audit_ntp_set_old(ad, AUDIT_NTP_TAI, *time_tai); audit_ntp_set_old(ad, AUDIT_NTP_TICK, ntpdata->tick_usec); process_adjtimex_modes(ntpdata, txc, time_tai); audit_ntp_set_new(ad, AUDIT_NTP_OFFSET, ntpdata->time_offset); audit_ntp_set_new(ad, AUDIT_NTP_FREQ, ntpdata->time_freq); audit_ntp_set_new(ad, AUDIT_NTP_STATUS, ntpdata->time_status); audit_ntp_set_new(ad, AUDIT_NTP_TAI, *time_tai); audit_ntp_set_new(ad, AUDIT_NTP_TICK, ntpdata->tick_usec); } txc->offset = shift_right(ntpdata->time_offset * NTP_INTERVAL_FREQ, NTP_SCALE_SHIFT); if (!(ntpdata->time_status & STA_NANO)) txc->offset = div_s64(txc->offset, NSEC_PER_USEC); } result = ntpdata->time_state; if (is_error_status(ntpdata->time_status)) result = TIME_ERROR; txc->freq = shift_right((ntpdata->time_freq >> PPM_SCALE_INV_SHIFT) * PPM_SCALE_INV, NTP_SCALE_SHIFT); txc->maxerror = ntpdata->time_maxerror; txc->esterror = ntpdata->time_esterror; txc->status = ntpdata->time_status; txc->constant = ntpdata->time_constant; txc->precision = 1; txc->tolerance = MAXFREQ_SCALED / PPM_SCALE; txc->tick = ntpdata->tick_usec; txc->tai = *time_tai; /* Fill PPS status fields */ pps_fill_timex(ntpdata, txc); txc->time.tv_sec = ts->tv_sec; txc->time.tv_usec = ts->tv_nsec; if (!(ntpdata->time_status & STA_NANO)) txc->time.tv_usec = ts->tv_nsec / NSEC_PER_USEC; /* Handle leapsec adjustments */ if (unlikely(ts->tv_sec >= ntpdata->ntp_next_leap_sec)) { if ((ntpdata->time_state == TIME_INS) && (ntpdata->time_status & STA_INS)) { result = TIME_OOP; txc->tai++; txc->time.tv_sec--; } if ((ntpdata->time_state == TIME_DEL) && (ntpdata->time_status & STA_DEL)) { result = TIME_WAIT; txc->tai--; txc->time.tv_sec++; } if ((ntpdata->time_state == TIME_OOP) && (ts->tv_sec == ntpdata->ntp_next_leap_sec)) result = TIME_WAIT; } return result; } #ifdef CONFIG_NTP_PPS /* * struct pps_normtime is basically a struct timespec, but it is * semantically different (and it is the reason why it was invented): * pps_normtime.nsec has a range of ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] * while timespec.tv_nsec has a range of [0, NSEC_PER_SEC) */ struct pps_normtime { s64 sec; /* seconds */ long nsec; /* nanoseconds */ }; /* * Normalize the timestamp so that nsec is in the * [ -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] interval */ static inline struct pps_normtime pps_normalize_ts(struct timespec64 ts) { struct pps_normtime norm = { .sec = ts.tv_sec, .nsec = ts.tv_nsec }; if (norm.nsec > (NSEC_PER_SEC >> 1)) { norm.nsec -= NSEC_PER_SEC; norm.sec++; } return norm; } /* Get current phase correction and jitter */ static inline long pps_phase_filter_get(struct ntp_data *ntpdata, long *jitter) { *jitter = ntpdata->pps_tf[0] - ntpdata->pps_tf[1]; if (*jitter < 0) *jitter = -*jitter; /* TODO: test various filters */ return ntpdata->pps_tf[0]; } /* Add the sample to the phase filter */ static inline void pps_phase_filter_add(struct ntp_data *ntpdata, long err) { ntpdata->pps_tf[2] = ntpdata->pps_tf[1]; ntpdata->pps_tf[1] = ntpdata->pps_tf[0]; ntpdata->pps_tf[0] = err; } /* * Decrease frequency calibration interval length. It is halved after four * consecutive unstable intervals. */ static inline void pps_dec_freq_interval(struct ntp_data *ntpdata) { if (--ntpdata->pps_intcnt <= -PPS_INTCOUNT) { ntpdata->pps_intcnt = -PPS_INTCOUNT; if (ntpdata->pps_shift > PPS_INTMIN) { ntpdata->pps_shift--; ntpdata->pps_intcnt = 0; } } } /* * Increase frequency calibration interval length. It is doubled after * four consecutive stable intervals. */ static inline void pps_inc_freq_interval(struct ntp_data *ntpdata) { if (++ntpdata->pps_intcnt >= PPS_INTCOUNT) { ntpdata->pps_intcnt = PPS_INTCOUNT; if (ntpdata->pps_shift < PPS_INTMAX) { ntpdata->pps_shift++; ntpdata->pps_intcnt = 0; } } } /* * Update clock frequency based on MONOTONIC_RAW clock PPS signal * timestamps * * At the end of the calibration interval the difference between the * first and last MONOTONIC_RAW clock timestamps divided by the length * of the interval becomes the frequency update. If the interval was * too long, the data are discarded. * Returns the difference between old and new frequency values. */ static long hardpps_update_freq(struct ntp_data *ntpdata, struct pps_normtime freq_norm) { long delta, delta_mod; s64 ftemp; /* Check if the frequency interval was too long */ if (freq_norm.sec > (2 << ntpdata->pps_shift)) { ntpdata->time_status |= STA_PPSERROR; ntpdata->pps_errcnt++; pps_dec_freq_interval(ntpdata); printk_deferred(KERN_ERR "hardpps: PPSERROR: interval too long - %lld s\n", freq_norm.sec); return 0; } /* * Here the raw frequency offset and wander (stability) is * calculated. If the wander is less than the wander threshold the * interval is increased; otherwise it is decreased. */ ftemp = div_s64(((s64)(-freq_norm.nsec)) << NTP_SCALE_SHIFT, freq_norm.sec); delta = shift_right(ftemp - ntpdata->pps_freq, NTP_SCALE_SHIFT); ntpdata->pps_freq = ftemp; if (delta > PPS_MAXWANDER || delta < -PPS_MAXWANDER) { printk_deferred(KERN_WARNING "hardpps: PPSWANDER: change=%ld\n", delta); ntpdata->time_status |= STA_PPSWANDER; ntpdata->pps_stbcnt++; pps_dec_freq_interval(ntpdata); } else { /* Good sample */ pps_inc_freq_interval(ntpdata); } /* * The stability metric is calculated as the average of recent * frequency changes, but is used only for performance monitoring */ delta_mod = delta; if (delta_mod < 0) delta_mod = -delta_mod; ntpdata->pps_stabil += (div_s64(((s64)delta_mod) << (NTP_SCALE_SHIFT - SHIFT_USEC), NSEC_PER_USEC) - ntpdata->pps_stabil) >> PPS_INTMIN; /* If enabled, the system clock frequency is updated */ if ((ntpdata->time_status & STA_PPSFREQ) && !(ntpdata->time_status & STA_FREQHOLD)) { ntpdata->time_freq = ntpdata->pps_freq; ntp_update_frequency(ntpdata); } return delta; } /* Correct REALTIME clock phase error against PPS signal */ static void hardpps_update_phase(struct ntp_data *ntpdata, long error) { long correction = -error; long jitter; /* Add the sample to the median filter */ pps_phase_filter_add(ntpdata, correction); correction = pps_phase_filter_get(ntpdata, &jitter); /* * Nominal jitter is due to PPS signal noise. If it exceeds the * threshold, the sample is discarded; otherwise, if so enabled, * the time offset is updated. */ if (jitter > (ntpdata->pps_jitter << PPS_POPCORN)) { printk_deferred(KERN_WARNING "hardpps: PPSJITTER: jitter=%ld, limit=%ld\n", jitter, (ntpdata->pps_jitter << PPS_POPCORN)); ntpdata->time_status |= STA_PPSJITTER; ntpdata->pps_jitcnt++; } else if (ntpdata->time_status & STA_PPSTIME) { /* Correct the time using the phase offset */ ntpdata->time_offset = div_s64(((s64)correction) << NTP_SCALE_SHIFT, NTP_INTERVAL_FREQ); /* Cancel running adjtime() */ ntpdata->time_adjust = 0; } /* Update jitter */ ntpdata->pps_jitter += (jitter - ntpdata->pps_jitter) >> PPS_INTMIN; } /* * __hardpps() - discipline CPU clock oscillator to external PPS signal * * This routine is called at each PPS signal arrival in order to * discipline the CPU clock oscillator to the PPS signal. It takes two * parameters: REALTIME and MONOTONIC_RAW clock timestamps. The former * is used to correct clock phase error and the latter is used to * correct the frequency. * * This code is based on David Mills's reference nanokernel * implementation. It was mostly rewritten but keeps the same idea. */ void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts) { struct pps_normtime pts_norm, freq_norm; struct ntp_data *ntpdata = &tk_ntp_data; pts_norm = pps_normalize_ts(*phase_ts); /* Clear the error bits, they will be set again if needed */ ntpdata->time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR); /* indicate signal presence */ ntpdata->time_status |= STA_PPSSIGNAL; ntpdata->pps_valid = PPS_VALID; /* * When called for the first time, just start the frequency * interval */ if (unlikely(ntpdata->pps_fbase.tv_sec == 0)) { ntpdata->pps_fbase = *raw_ts; return; } /* Ok, now we have a base for frequency calculation */ freq_norm = pps_normalize_ts(timespec64_sub(*raw_ts, ntpdata->pps_fbase)); /* * Check that the signal is in the range * [1s - MAXFREQ us, 1s + MAXFREQ us], otherwise reject it */ if ((freq_norm.sec == 0) || (freq_norm.nsec > MAXFREQ * freq_norm.sec) || (freq_norm.nsec < -MAXFREQ * freq_norm.sec)) { ntpdata->time_status |= STA_PPSJITTER; /* Restart the frequency calibration interval */ ntpdata->pps_fbase = *raw_ts; printk_deferred(KERN_ERR "hardpps: PPSJITTER: bad pulse\n"); return; } /* Signal is ok. Check if the current frequency interval is finished */ if (freq_norm.sec >= (1 << ntpdata->pps_shift)) { ntpdata->pps_calcnt++; /* Restart the frequency calibration interval */ ntpdata->pps_fbase = *raw_ts; hardpps_update_freq(ntpdata, freq_norm); } hardpps_update_phase(ntpdata, pts_norm.nsec); } #endif /* CONFIG_NTP_PPS */ static int __init ntp_tick_adj_setup(char *str) { int rc = kstrtos64(str, 0, &tk_ntp_data.ntp_tick_adj); if (rc) return rc; tk_ntp_data.ntp_tick_adj <<= NTP_SCALE_SHIFT; return 1; } __setup("ntp_tick_adj=", ntp_tick_adj_setup); void __init ntp_init(void) { ntp_clear(); ntp_init_cmos_sync(); }
3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2008 by Karsten Keil <kkeil@novell.com> */ #include <linux/slab.h> #include <linux/types.h> #include <linux/stddef.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/mISDNif.h> #include "core.h" static u_int debug; MODULE_AUTHOR("Karsten Keil"); MODULE_DESCRIPTION("Modular ISDN core driver"); MODULE_LICENSE("GPL"); module_param(debug, uint, S_IRUGO | S_IWUSR); static u64 device_ids; #define MAX_DEVICE_ID 63 static LIST_HEAD(Bprotocols); static DEFINE_RWLOCK(bp_lock); static void mISDN_dev_release(struct device *dev) { /* nothing to do: the device is part of its parent's data structure */ } static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mISDNdevice *mdev = dev_to_mISDN(dev); if (!mdev) return -ENODEV; return sprintf(buf, "%d\n", mdev->id); } static DEVICE_ATTR_RO(id); static ssize_t nrbchan_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mISDNdevice *mdev = dev_to_mISDN(dev); if (!mdev) return -ENODEV; return sprintf(buf, "%d\n", mdev->nrbchan); } static DEVICE_ATTR_RO(nrbchan); static ssize_t d_protocols_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mISDNdevice *mdev = dev_to_mISDN(dev); if (!mdev) return -ENODEV; return sprintf(buf, "%d\n", mdev->Dprotocols); } static DEVICE_ATTR_RO(d_protocols); static ssize_t b_protocols_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mISDNdevice *mdev = dev_to_mISDN(dev); if (!mdev) return -ENODEV; return sprintf(buf, "%d\n", mdev->Bprotocols | get_all_Bprotocols()); } static DEVICE_ATTR_RO(b_protocols); static ssize_t protocol_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mISDNdevice *mdev = dev_to_mISDN(dev); if (!mdev) return -ENODEV; return sprintf(buf, "%d\n", mdev->D.protocol); } static DEVICE_ATTR_RO(protocol); static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { strcpy(buf, dev_name(dev)); return strlen(buf); } static DEVICE_ATTR_RO(name); #if 0 /* hangs */ static ssize_t name_set(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int err = 0; char *out = kmalloc(count + 1, GFP_KERNEL); if (!out) return -ENOMEM; memcpy(out, buf, count); if (count && out[count - 1] == '\n') out[--count] = 0; if (count) err = device_rename(dev, out); kfree(out); return (err < 0) ? err : count; } static DEVICE_ATTR_RW(name); #endif static ssize_t channelmap_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mISDNdevice *mdev = dev_to_mISDN(dev); char *bp = buf; int i; for (i = 0; i <= mdev->nrbchan; i++) *bp++ = test_channelmap(i, mdev->channelmap) ? '1' : '0'; return bp - buf; } static DEVICE_ATTR_RO(channelmap); static struct attribute *mISDN_attrs[] = { &dev_attr_id.attr, &dev_attr_d_protocols.attr, &dev_attr_b_protocols.attr, &dev_attr_protocol.attr, &dev_attr_channelmap.attr, &dev_attr_nrbchan.attr, &dev_attr_name.attr, NULL, }; ATTRIBUTE_GROUPS(mISDN); static int mISDN_uevent(const struct device *dev, struct kobj_uevent_env *env) { const struct mISDNdevice *mdev = dev_to_mISDN(dev); if (!mdev) return 0; if (add_uevent_var(env, "nchans=%d", mdev->nrbchan)) return -ENOMEM; return 0; } static struct class mISDN_class = { .name = "mISDN", .dev_uevent = mISDN_uevent, .dev_groups = mISDN_groups, .dev_release = mISDN_dev_release, }; static int _get_mdevice(struct device *dev, const void *id) { struct mISDNdevice *mdev = dev_to_mISDN(dev); if (!mdev) return 0; if (mdev->id != *(const u_int *)id) return 0; return 1; } struct mISDNdevice *get_mdevice(u_int id) { return dev_to_mISDN(class_find_device(&mISDN_class, NULL, &id, _get_mdevice)); } static int _get_mdevice_count(struct device *dev, void *cnt) { *(int *)cnt += 1; return 0; } int get_mdevice_count(void) { int cnt = 0; class_for_each_device(&mISDN_class, NULL, &cnt, _get_mdevice_count); return cnt; } static int get_free_devid(void) { u_int i; for (i = 0; i <= MAX_DEVICE_ID; i++) if (!test_and_set_bit(i, (u_long *)&device_ids)) break; if (i > MAX_DEVICE_ID) return -EBUSY; return i; } int mISDN_register_device(struct mISDNdevice *dev, struct device *parent, char *name) { int err; err = get_free_devid(); if (err < 0) return err; dev->id = err; device_initialize(&dev->dev); if (name && name[0]) dev_set_name(&dev->dev, "%s", name); else dev_set_name(&dev->dev, "mISDN%d", dev->id); if (debug & DEBUG_CORE) printk(KERN_DEBUG "mISDN_register %s %d\n", dev_name(&dev->dev), dev->id); dev->dev.class = &mISDN_class; err = create_stack(dev); if (err) goto error1; dev->dev.platform_data = dev; dev->dev.parent = parent; dev_set_drvdata(&dev->dev, dev); err = device_add(&dev->dev); if (err) goto error3; return 0; error3: delete_stack(dev); error1: put_device(&dev->dev); return err; } EXPORT_SYMBOL(mISDN_register_device); void mISDN_unregister_device(struct mISDNdevice *dev) { if (debug & DEBUG_CORE) printk(KERN_DEBUG "mISDN_unregister %s %d\n", dev_name(&dev->dev), dev->id); /* sysfs_remove_link(&dev->dev.kobj, "device"); */ device_del(&dev->dev); dev_set_drvdata(&dev->dev, NULL); test_and_clear_bit(dev->id, (u_long *)&device_ids); delete_stack(dev); put_device(&dev->dev); } EXPORT_SYMBOL(mISDN_unregister_device); u_int get_all_Bprotocols(void) { struct Bprotocol *bp; u_int m = 0; read_lock(&bp_lock); list_for_each_entry(bp, &Bprotocols, list) m |= bp->Bprotocols; read_unlock(&bp_lock); return m; } struct Bprotocol * get_Bprotocol4mask(u_int m) { struct Bprotocol *bp; read_lock(&bp_lock); list_for_each_entry(bp, &Bprotocols, list) if (bp->Bprotocols & m) { read_unlock(&bp_lock); return bp; } read_unlock(&bp_lock); return NULL; } int mISDN_register_Bprotocol(struct Bprotocol *bp) { u_long flags; struct Bprotocol *old; if (debug & DEBUG_CORE) printk(KERN_DEBUG "%s: %s/%x\n", __func__, bp->name, bp->Bprotocols); old = get_Bprotocol4mask(bp->Bprotocols); if (old) { printk(KERN_WARNING "register duplicate protocol old %s/%x new %s/%x\n", old->name, old->Bprotocols, bp->name, bp->Bprotocols); return -EBUSY; } write_lock_irqsave(&bp_lock, flags); list_add_tail(&bp->list, &Bprotocols); write_unlock_irqrestore(&bp_lock, flags); return 0; } EXPORT_SYMBOL(mISDN_register_Bprotocol); void mISDN_unregister_Bprotocol(struct Bprotocol *bp) { u_long flags; if (debug & DEBUG_CORE) printk(KERN_DEBUG "%s: %s/%x\n", __func__, bp->name, bp->Bprotocols); write_lock_irqsave(&bp_lock, flags); list_del(&bp->list); write_unlock_irqrestore(&bp_lock, flags); } EXPORT_SYMBOL(mISDN_unregister_Bprotocol); static const char *msg_no_channel = "<no channel>"; static const char *msg_no_stack = "<no stack>"; static const char *msg_no_stackdev = "<no stack device>"; const char *mISDNDevName4ch(struct mISDNchannel *ch) { if (!ch) return msg_no_channel; if (!ch->st) return msg_no_stack; if (!ch->st->dev) return msg_no_stackdev; return dev_name(&ch->st->dev->dev); }; EXPORT_SYMBOL(mISDNDevName4ch); static int mISDNInit(void) { int err; printk(KERN_INFO "Modular ISDN core version %d.%d.%d\n", MISDN_MAJOR_VERSION, MISDN_MINOR_VERSION, MISDN_RELEASE); mISDN_init_clock(&debug); mISDN_initstack(&debug); err = class_register(&mISDN_class); if (err) goto error1; err = mISDN_inittimer(&debug); if (err) goto error2; err = Isdnl1_Init(&debug); if (err) goto error3; err = Isdnl2_Init(&debug); if (err) goto error4; err = misdn_sock_init(&debug); if (err) goto error5; return 0; error5: Isdnl2_cleanup(); error4: Isdnl1_cleanup(); error3: mISDN_timer_cleanup(); error2: class_unregister(&mISDN_class); error1: return err; } static void mISDN_cleanup(void) { misdn_sock_cleanup(); Isdnl2_cleanup(); Isdnl1_cleanup(); mISDN_timer_cleanup(); class_unregister(&mISDN_class); printk(KERN_DEBUG "mISDNcore unloaded\n"); } module_init(mISDNInit); module_exit(mISDN_cleanup);
1 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 // SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/tty.h> #include <linux/module.h> /* * n_null.c - Null line discipline used in the failure path * * Copyright (C) Intel 2017 */ static ssize_t n_null_read(struct tty_struct *tty, struct file *file, u8 *buf, size_t nr, void **cookie, unsigned long offset) { return -EOPNOTSUPP; } static ssize_t n_null_write(struct tty_struct *tty, struct file *file, const u8 *buf, size_t nr) { return -EOPNOTSUPP; } static struct tty_ldisc_ops null_ldisc = { .owner = THIS_MODULE, .num = N_NULL, .name = "n_null", .read = n_null_read, .write = n_null_write, }; static int __init n_null_init(void) { BUG_ON(tty_register_ldisc(&null_ldisc)); return 0; } static void __exit n_null_exit(void) { tty_unregister_ldisc(&null_ldisc); } module_init(n_null_init); module_exit(n_null_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alan Cox"); MODULE_ALIAS_LDISC(N_NULL); MODULE_DESCRIPTION("Null ldisc driver");
13 13 13 10 10 10 10 10 10 10 10 11 11 11 11 11 11 10 10 10 10 10 8 8 8 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 // SPDX-License-Identifier: GPL-2.0-only #include <linux/clockchips.h> #include <linux/interrupt.h> #include <linux/export.h> #include <linux/delay.h> #include <linux/hpet.h> #include <linux/cpu.h> #include <linux/irq.h> #include <asm/cpuid.h> #include <asm/irq_remapping.h> #include <asm/hpet.h> #include <asm/time.h> #include <asm/mwait.h> #undef pr_fmt #define pr_fmt(fmt) "hpet: " fmt enum hpet_mode { HPET_MODE_UNUSED, HPET_MODE_LEGACY, HPET_MODE_CLOCKEVT, HPET_MODE_DEVICE, }; struct hpet_channel { struct clock_event_device evt; unsigned int num; unsigned int cpu; unsigned int irq; unsigned int in_use; enum hpet_mode mode; unsigned int boot_cfg; char name[10]; }; struct hpet_base { unsigned int nr_channels; unsigned int nr_clockevents; unsigned int boot_cfg; struct hpet_channel *channels; }; #define HPET_MASK CLOCKSOURCE_MASK(32) #define HPET_MIN_CYCLES 128 #define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1)) /* * HPET address is set in acpi/boot.c, when an ACPI entry exists */ unsigned long hpet_address; u8 hpet_blockid; /* OS timer block num */ bool hpet_msi_disable; #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_GENERIC_MSI_IRQ) static DEFINE_PER_CPU(struct hpet_channel *, cpu_hpet_channel); static struct irq_domain *hpet_domain; #endif static void __iomem *hpet_virt_address; static struct hpet_base hpet_base; static bool hpet_legacy_int_enabled; static unsigned long hpet_freq; bool boot_hpet_disable; bool hpet_force_user; static bool hpet_verbose; static inline struct hpet_channel *clockevent_to_channel(struct clock_event_device *evt) { return container_of(evt, struct hpet_channel, evt); } inline unsigned int hpet_readl(unsigned int a) { return readl(hpet_virt_address + a); } static inline void hpet_writel(unsigned int d, unsigned int a) { writel(d, hpet_virt_address + a); } static inline void hpet_set_mapping(void) { hpet_virt_address = ioremap(hpet_address, HPET_MMAP_SIZE); } static inline void hpet_clear_mapping(void) { iounmap(hpet_virt_address); hpet_virt_address = NULL; } /* * HPET command line enable / disable */ static int __init hpet_setup(char *str) { while (str) { char *next = strchr(str, ','); if (next) *next++ = 0; if (!strncmp("disable", str, 7)) boot_hpet_disable = true; if (!strncmp("force", str, 5)) hpet_force_user = true; if (!strncmp("verbose", str, 7)) hpet_verbose = true; str = next; } return 1; } __setup("hpet=", hpet_setup); static int __init disable_hpet(char *str) { boot_hpet_disable = true; return 1; } __setup("nohpet", disable_hpet); static inline int is_hpet_capable(void) { return !boot_hpet_disable && hpet_address; } /** * is_hpet_enabled - Check whether the legacy HPET timer interrupt is enabled */ int is_hpet_enabled(void) { return is_hpet_capable() && hpet_legacy_int_enabled; } EXPORT_SYMBOL_GPL(is_hpet_enabled); static void _hpet_print_config(const char *function, int line) { u32 i, id, period, cfg, status, channels, l, h; pr_info("%s(%d):\n", function, line); id = hpet_readl(HPET_ID); period = hpet_readl(HPET_PERIOD); pr_info("ID: 0x%x, PERIOD: 0x%x\n", id, period); cfg = hpet_readl(HPET_CFG); status = hpet_readl(HPET_STATUS); pr_info("CFG: 0x%x, STATUS: 0x%x\n", cfg, status); l = hpet_readl(HPET_COUNTER); h = hpet_readl(HPET_COUNTER+4); pr_info("COUNTER_l: 0x%x, COUNTER_h: 0x%x\n", l, h); channels = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1; for (i = 0; i < channels; i++) { l = hpet_readl(HPET_Tn_CFG(i)); h = hpet_readl(HPET_Tn_CFG(i)+4); pr_info("T%d: CFG_l: 0x%x, CFG_h: 0x%x\n", i, l, h); l = hpet_readl(HPET_Tn_CMP(i)); h = hpet_readl(HPET_Tn_CMP(i)+4); pr_info("T%d: CMP_l: 0x%x, CMP_h: 0x%x\n", i, l, h); l = hpet_readl(HPET_Tn_ROUTE(i)); h = hpet_readl(HPET_Tn_ROUTE(i)+4); pr_info("T%d ROUTE_l: 0x%x, ROUTE_h: 0x%x\n", i, l, h); } } #define hpet_print_config() \ do { \ if (hpet_verbose) \ _hpet_print_config(__func__, __LINE__); \ } while (0) /* * When the HPET driver (/dev/hpet) is enabled, we need to reserve * timer 0 and timer 1 in case of RTC emulation. */ #ifdef CONFIG_HPET static void __init hpet_reserve_platform_timers(void) { struct hpet_data hd; unsigned int i; memset(&hd, 0, sizeof(hd)); hd.hd_phys_address = hpet_address; hd.hd_address = hpet_virt_address; hd.hd_nirqs = hpet_base.nr_channels; /* * NOTE that hd_irq[] reflects IOAPIC input pins (LEGACY_8254 * is wrong for i8259!) not the output IRQ. Many BIOS writers * don't bother configuring *any* comparator interrupts. */ hd.hd_irq[0] = HPET_LEGACY_8254; hd.hd_irq[1] = HPET_LEGACY_RTC; for (i = 0; i < hpet_base.nr_channels; i++) { struct hpet_channel *hc = hpet_base.channels + i; if (i >= 2) hd.hd_irq[i] = hc->irq; switch (hc->mode) { case HPET_MODE_UNUSED: case HPET_MODE_DEVICE: hc->mode = HPET_MODE_DEVICE; break; case HPET_MODE_CLOCKEVT: case HPET_MODE_LEGACY: hpet_reserve_timer(&hd, hc->num); break; } } hpet_alloc(&hd); } static void __init hpet_select_device_channel(void) { int i; for (i = 0; i < hpet_base.nr_channels; i++) { struct hpet_channel *hc = hpet_base.channels + i; /* Associate the first unused channel to /dev/hpet */ if (hc->mode == HPET_MODE_UNUSED) { hc->mode = HPET_MODE_DEVICE; return; } } } #else static inline void hpet_reserve_platform_timers(void) { } static inline void hpet_select_device_channel(void) {} #endif /* Common HPET functions */ static void hpet_stop_counter(void) { u32 cfg = hpet_readl(HPET_CFG); cfg &= ~HPET_CFG_ENABLE; hpet_writel(cfg, HPET_CFG); } static void hpet_reset_counter(void) { hpet_writel(0, HPET_COUNTER); hpet_writel(0, HPET_COUNTER + 4); } static void hpet_start_counter(void) { unsigned int cfg = hpet_readl(HPET_CFG); cfg |= HPET_CFG_ENABLE; hpet_writel(cfg, HPET_CFG); } static void hpet_restart_counter(void) { hpet_stop_counter(); hpet_reset_counter(); hpet_start_counter(); } static void hpet_resume_device(void) { force_hpet_resume(); } static void hpet_resume_counter(struct clocksource *cs) { hpet_resume_device(); hpet_restart_counter(); } static void hpet_enable_legacy_int(void) { unsigned int cfg = hpet_readl(HPET_CFG); cfg |= HPET_CFG_LEGACY; hpet_writel(cfg, HPET_CFG); hpet_legacy_int_enabled = true; } static int hpet_clkevt_set_state_periodic(struct clock_event_device *evt) { unsigned int channel = clockevent_to_channel(evt)->num; unsigned int cfg, cmp, now; uint64_t delta; hpet_stop_counter(); delta = ((uint64_t)(NSEC_PER_SEC / HZ)) * evt->mult; delta >>= evt->shift; now = hpet_readl(HPET_COUNTER); cmp = now + (unsigned int)delta; cfg = hpet_readl(HPET_Tn_CFG(channel)); cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL | HPET_TN_32BIT; hpet_writel(cfg, HPET_Tn_CFG(channel)); hpet_writel(cmp, HPET_Tn_CMP(channel)); udelay(1); /* * HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL * cleared) to T0_CMP to set the period. The HPET_TN_SETVAL * bit is automatically cleared after the first write. * (See AMD-8111 HyperTransport I/O Hub Data Sheet, * Publication # 24674) */ hpet_writel((unsigned int)delta, HPET_Tn_CMP(channel)); hpet_start_counter(); hpet_print_config(); return 0; } static int hpet_clkevt_set_state_oneshot(struct clock_event_device *evt) { unsigned int channel = clockevent_to_channel(evt)->num; unsigned int cfg; cfg = hpet_readl(HPET_Tn_CFG(channel)); cfg &= ~HPET_TN_PERIODIC; cfg |= HPET_TN_ENABLE | HPET_TN_32BIT; hpet_writel(cfg, HPET_Tn_CFG(channel)); return 0; } static int hpet_clkevt_set_state_shutdown(struct clock_event_device *evt) { unsigned int channel = clockevent_to_channel(evt)->num; unsigned int cfg; cfg = hpet_readl(HPET_Tn_CFG(channel)); cfg &= ~HPET_TN_ENABLE; hpet_writel(cfg, HPET_Tn_CFG(channel)); return 0; } static int hpet_clkevt_legacy_resume(struct clock_event_device *evt) { hpet_enable_legacy_int(); hpet_print_config(); return 0; } static int hpet_clkevt_set_next_event(unsigned long delta, struct clock_event_device *evt) { unsigned int channel = clockevent_to_channel(evt)->num; u32 cnt; s32 res; cnt = hpet_readl(HPET_COUNTER); cnt += (u32) delta; hpet_writel(cnt, HPET_Tn_CMP(channel)); /* * HPETs are a complete disaster. The compare register is * based on a equal comparison and neither provides a less * than or equal functionality (which would require to take * the wraparound into account) nor a simple count down event * mode. Further the write to the comparator register is * delayed internally up to two HPET clock cycles in certain * chipsets (ATI, ICH9,10). Some newer AMD chipsets have even * longer delays. We worked around that by reading back the * compare register, but that required another workaround for * ICH9,10 chips where the first readout after write can * return the old stale value. We already had a minimum * programming delta of 5us enforced, but a NMI or SMI hitting * between the counter readout and the comparator write can * move us behind that point easily. Now instead of reading * the compare register back several times, we make the ETIME * decision based on the following: Return ETIME if the * counter value after the write is less than HPET_MIN_CYCLES * away from the event or if the counter is already ahead of * the event. The minimum programming delta for the generic * clockevents code is set to 1.5 * HPET_MIN_CYCLES. */ res = (s32)(cnt - hpet_readl(HPET_COUNTER)); return res < HPET_MIN_CYCLES ? -ETIME : 0; } static void hpet_init_clockevent(struct hpet_channel *hc, unsigned int rating) { struct clock_event_device *evt = &hc->evt; evt->rating = rating; evt->irq = hc->irq; evt->name = hc->name; evt->cpumask = cpumask_of(hc->cpu); evt->set_state_oneshot = hpet_clkevt_set_state_oneshot; evt->set_next_event = hpet_clkevt_set_next_event; evt->set_state_shutdown = hpet_clkevt_set_state_shutdown; evt->features = CLOCK_EVT_FEAT_ONESHOT; if (hc->boot_cfg & HPET_TN_PERIODIC) { evt->features |= CLOCK_EVT_FEAT_PERIODIC; evt->set_state_periodic = hpet_clkevt_set_state_periodic; } } static void __init hpet_legacy_clockevent_register(struct hpet_channel *hc) { /* * Start HPET with the boot CPU's cpumask and make it global after * the IO_APIC has been initialized. */ hc->cpu = boot_cpu_data.cpu_index; strscpy(hc->name, "hpet", sizeof(hc->name)); hpet_init_clockevent(hc, 50); hc->evt.tick_resume = hpet_clkevt_legacy_resume; /* * Legacy horrors and sins from the past. HPET used periodic mode * unconditionally forever on the legacy channel 0. Removing the * below hack and using the conditional in hpet_init_clockevent() * makes at least Qemu and one hardware machine fail to boot. * There are two issues which cause the boot failure: * * #1 After the timer delivery test in IOAPIC and the IOAPIC setup * the next interrupt is not delivered despite the HPET channel * being programmed correctly. Reprogramming the HPET after * switching to IOAPIC makes it work again. After fixing this, * the next issue surfaces: * * #2 Due to the unconditional periodic mode availability the Local * APIC timer calibration can hijack the global clockevents * event handler without causing damage. Using oneshot at this * stage makes if hang because the HPET does not get * reprogrammed due to the handler hijacking. Duh, stupid me! * * Both issues require major surgery and especially the kick HPET * again after enabling IOAPIC results in really nasty hackery. * This 'assume periodic works' magic has survived since HPET * support got added, so it's questionable whether this should be * fixed. Both Qemu and the failing hardware machine support * periodic mode despite the fact that both don't advertise it in * the configuration register and both need that extra kick after * switching to IOAPIC. Seems to be a feature... */ hc->evt.features |= CLOCK_EVT_FEAT_PERIODIC; hc->evt.set_state_periodic = hpet_clkevt_set_state_periodic; /* Start HPET legacy interrupts */ hpet_enable_legacy_int(); clockevents_config_and_register(&hc->evt, hpet_freq, HPET_MIN_PROG_DELTA, 0x7FFFFFFF); global_clock_event = &hc->evt; pr_debug("Clockevent registered\n"); } /* * HPET MSI Support */ #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_GENERIC_MSI_IRQ) static void hpet_msi_unmask(struct irq_data *data) { struct hpet_channel *hc = irq_data_get_irq_handler_data(data); unsigned int cfg; cfg = hpet_readl(HPET_Tn_CFG(hc->num)); cfg |= HPET_TN_ENABLE | HPET_TN_FSB; hpet_writel(cfg, HPET_Tn_CFG(hc->num)); } static void hpet_msi_mask(struct irq_data *data) { struct hpet_channel *hc = irq_data_get_irq_handler_data(data); unsigned int cfg; cfg = hpet_readl(HPET_Tn_CFG(hc->num)); cfg &= ~(HPET_TN_ENABLE | HPET_TN_FSB); hpet_writel(cfg, HPET_Tn_CFG(hc->num)); } static void hpet_msi_write(struct hpet_channel *hc, struct msi_msg *msg) { hpet_writel(msg->data, HPET_Tn_ROUTE(hc->num)); hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hc->num) + 4); } static void hpet_msi_write_msg(struct irq_data *data, struct msi_msg *msg) { hpet_msi_write(irq_data_get_irq_handler_data(data), msg); } static struct irq_chip hpet_msi_controller __ro_after_init = { .name = "HPET-MSI", .irq_unmask = hpet_msi_unmask, .irq_mask = hpet_msi_mask, .irq_ack = irq_chip_ack_parent, .irq_set_affinity = msi_domain_set_affinity, .irq_retrigger = irq_chip_retrigger_hierarchy, .irq_write_msi_msg = hpet_msi_write_msg, .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP, }; static int hpet_msi_init(struct irq_domain *domain, struct msi_domain_info *info, unsigned int virq, irq_hw_number_t hwirq, msi_alloc_info_t *arg) { irq_domain_set_info(domain, virq, arg->hwirq, info->chip, NULL, handle_edge_irq, arg->data, "edge"); return 0; } static struct msi_domain_ops hpet_msi_domain_ops = { .msi_init = hpet_msi_init, }; static struct msi_domain_info hpet_msi_domain_info = { .ops = &hpet_msi_domain_ops, .chip = &hpet_msi_controller, .flags = MSI_FLAG_USE_DEF_DOM_OPS, }; static struct irq_domain *hpet_create_irq_domain(int hpet_id) { struct msi_domain_info *domain_info; struct irq_domain *parent, *d; struct fwnode_handle *fn; struct irq_fwspec fwspec; if (x86_vector_domain == NULL) return NULL; domain_info = kzalloc(sizeof(*domain_info), GFP_KERNEL); if (!domain_info) return NULL; *domain_info = hpet_msi_domain_info; domain_info->data = (void *)(long)hpet_id; fn = irq_domain_alloc_named_id_fwnode(hpet_msi_controller.name, hpet_id); if (!fn) { kfree(domain_info); return NULL; } fwspec.fwnode = fn; fwspec.param_count = 1; fwspec.param[0] = hpet_id; parent = irq_find_matching_fwspec(&fwspec, DOMAIN_BUS_GENERIC_MSI); if (!parent) { irq_domain_free_fwnode(fn); kfree(domain_info); return NULL; } if (parent != x86_vector_domain) hpet_msi_controller.name = "IR-HPET-MSI"; d = msi_create_irq_domain(fn, domain_info, parent); if (!d) { irq_domain_free_fwnode(fn); kfree(domain_info); } return d; } static inline int hpet_dev_id(struct irq_domain *domain) { struct msi_domain_info *info = msi_get_domain_info(domain); return (int)(long)info->data; } static int hpet_assign_irq(struct irq_domain *domain, struct hpet_channel *hc, int dev_num) { struct irq_alloc_info info; init_irq_alloc_info(&info, NULL); info.type = X86_IRQ_ALLOC_TYPE_HPET; info.data = hc; info.devid = hpet_dev_id(domain); info.hwirq = dev_num; return irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, &info); } static int hpet_clkevt_msi_resume(struct clock_event_device *evt) { struct hpet_channel *hc = clockevent_to_channel(evt); struct irq_data *data = irq_get_irq_data(hc->irq); struct msi_msg msg; /* Restore the MSI msg and unmask the interrupt */ irq_chip_compose_msi_msg(data, &msg); hpet_msi_write(hc, &msg); hpet_msi_unmask(data); return 0; } static irqreturn_t hpet_msi_interrupt_handler(int irq, void *data) { struct hpet_channel *hc = data; struct clock_event_device *evt = &hc->evt; if (!evt->event_handler) { pr_info("Spurious interrupt HPET channel %d\n", hc->num); return IRQ_HANDLED; } evt->event_handler(evt); return IRQ_HANDLED; } static int hpet_setup_msi_irq(struct hpet_channel *hc) { if (request_irq(hc->irq, hpet_msi_interrupt_handler, IRQF_TIMER | IRQF_NOBALANCING, hc->name, hc)) return -1; disable_irq(hc->irq); irq_set_affinity(hc->irq, cpumask_of(hc->cpu)); enable_irq(hc->irq); pr_debug("%s irq %u for MSI\n", hc->name, hc->irq); return 0; } /* Invoked from the hotplug callback on @cpu */ static void init_one_hpet_msi_clockevent(struct hpet_channel *hc, int cpu) { struct clock_event_device *evt = &hc->evt; hc->cpu = cpu; per_cpu(cpu_hpet_channel, cpu) = hc; hpet_setup_msi_irq(hc); hpet_init_clockevent(hc, 110); evt->tick_resume = hpet_clkevt_msi_resume; clockevents_config_and_register(evt, hpet_freq, HPET_MIN_PROG_DELTA, 0x7FFFFFFF); } static struct hpet_channel *hpet_get_unused_clockevent(void) { int i; for (i = 0; i < hpet_base.nr_channels; i++) { struct hpet_channel *hc = hpet_base.channels + i; if (hc->mode != HPET_MODE_CLOCKEVT || hc->in_use) continue; hc->in_use = 1; return hc; } return NULL; } static int hpet_cpuhp_online(unsigned int cpu) { struct hpet_channel *hc = hpet_get_unused_clockevent(); if (hc) init_one_hpet_msi_clockevent(hc, cpu); return 0; } static int hpet_cpuhp_dead(unsigned int cpu) { struct hpet_channel *hc = per_cpu(cpu_hpet_channel, cpu); if (!hc) return 0; free_irq(hc->irq, hc); hc->in_use = 0; per_cpu(cpu_hpet_channel, cpu) = NULL; return 0; } static void __init hpet_select_clockevents(void) { unsigned int i; hpet_base.nr_clockevents = 0; /* No point if MSI is disabled or CPU has an Always Running APIC Timer */ if (hpet_msi_disable || boot_cpu_has(X86_FEATURE_ARAT)) return; hpet_print_config(); hpet_domain = hpet_create_irq_domain(hpet_blockid); if (!hpet_domain) return; for (i = 0; i < hpet_base.nr_channels; i++) { struct hpet_channel *hc = hpet_base.channels + i; int irq; if (hc->mode != HPET_MODE_UNUSED) continue; /* Only consider HPET channel with MSI support */ if (!(hc->boot_cfg & HPET_TN_FSB_CAP)) continue; sprintf(hc->name, "hpet%d", i); irq = hpet_assign_irq(hpet_domain, hc, hc->num); if (irq <= 0) continue; hc->irq = irq; hc->mode = HPET_MODE_CLOCKEVT; if (++hpet_base.nr_clockevents == num_possible_cpus()) break; } pr_info("%d channels of %d reserved for per-cpu timers\n", hpet_base.nr_channels, hpet_base.nr_clockevents); } #else static inline void hpet_select_clockevents(void) { } #define hpet_cpuhp_online NULL #define hpet_cpuhp_dead NULL #endif /* * Clock source related code */ #if defined(CONFIG_SMP) && defined(CONFIG_64BIT) /* * Reading the HPET counter is a very slow operation. If a large number of * CPUs are trying to access the HPET counter simultaneously, it can cause * massive delays and slow down system performance dramatically. This may * happen when HPET is the default clock source instead of TSC. For a * really large system with hundreds of CPUs, the slowdown may be so * severe, that it can actually crash the system because of a NMI watchdog * soft lockup, for example. * * If multiple CPUs are trying to access the HPET counter at the same time, * we don't actually need to read the counter multiple times. Instead, the * other CPUs can use the counter value read by the first CPU in the group. * * This special feature is only enabled on x86-64 systems. It is unlikely * that 32-bit x86 systems will have enough CPUs to require this feature * with its associated locking overhead. We also need 64-bit atomic read. * * The lock and the HPET value are stored together and can be read in a * single atomic 64-bit read. It is explicitly assumed that arch_spinlock_t * is 32 bits in size. */ union hpet_lock { struct { arch_spinlock_t lock; u32 value; }; u64 lockval; }; static union hpet_lock hpet __cacheline_aligned = { { .lock = __ARCH_SPIN_LOCK_UNLOCKED, }, }; static u64 read_hpet(struct clocksource *cs) { unsigned long flags; union hpet_lock old, new; BUILD_BUG_ON(sizeof(union hpet_lock) != 8); /* * Read HPET directly if in NMI. */ if (in_nmi()) return (u64)hpet_readl(HPET_COUNTER); /* * Read the current state of the lock and HPET value atomically. */ old.lockval = READ_ONCE(hpet.lockval); if (arch_spin_is_locked(&old.lock)) goto contended; local_irq_save(flags); if (arch_spin_trylock(&hpet.lock)) { new.value = hpet_readl(HPET_COUNTER); /* * Use WRITE_ONCE() to prevent store tearing. */ WRITE_ONCE(hpet.value, new.value); arch_spin_unlock(&hpet.lock); local_irq_restore(flags); return (u64)new.value; } local_irq_restore(flags); contended: /* * Contended case * -------------- * Wait until the HPET value change or the lock is free to indicate * its value is up-to-date. * * It is possible that old.value has already contained the latest * HPET value while the lock holder was in the process of releasing * the lock. Checking for lock state change will enable us to return * the value immediately instead of waiting for the next HPET reader * to come along. */ do { cpu_relax(); new.lockval = READ_ONCE(hpet.lockval); } while ((new.value == old.value) && arch_spin_is_locked(&new.lock)); return (u64)new.value; } #else /* * For UP or 32-bit. */ static u64 read_hpet(struct clocksource *cs) { return (u64)hpet_readl(HPET_COUNTER); } #endif static struct clocksource clocksource_hpet = { .name = "hpet", .rating = 250, .read = read_hpet, .mask = HPET_MASK, .flags = CLOCK_SOURCE_IS_CONTINUOUS, .resume = hpet_resume_counter, }; /* * AMD SB700 based systems with spread spectrum enabled use a SMM based * HPET emulation to provide proper frequency setting. * * On such systems the SMM code is initialized with the first HPET register * access and takes some time to complete. During this time the config * register reads 0xffffffff. We check for max 1000 loops whether the * config register reads a non-0xffffffff value to make sure that the * HPET is up and running before we proceed any further. * * A counting loop is safe, as the HPET access takes thousands of CPU cycles. * * On non-SB700 based machines this check is only done once and has no * side effects. */ static bool __init hpet_cfg_working(void) { int i; for (i = 0; i < 1000; i++) { if (hpet_readl(HPET_CFG) != 0xFFFFFFFF) return true; } pr_warn("Config register invalid. Disabling HPET\n"); return false; } static bool __init hpet_counting(void) { u64 start, now, t1; hpet_restart_counter(); t1 = hpet_readl(HPET_COUNTER); start = rdtsc(); /* * We don't know the TSC frequency yet, but waiting for * 200000 TSC cycles is safe: * 4 GHz == 50us * 1 GHz == 200us */ do { if (t1 != hpet_readl(HPET_COUNTER)) return true; now = rdtsc(); } while ((now - start) < 200000UL); pr_warn("Counter not counting. HPET disabled\n"); return false; } static bool __init mwait_pc10_supported(void) { unsigned int eax, ebx, ecx, mwait_substates; if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) return false; if (!cpu_feature_enabled(X86_FEATURE_MWAIT)) return false; cpuid(CPUID_LEAF_MWAIT, &eax, &ebx, &ecx, &mwait_substates); return (ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) && (ecx & CPUID5_ECX_INTERRUPT_BREAK) && (mwait_substates & (0xF << 28)); } /* * Check whether the system supports PC10. If so force disable HPET as that * stops counting in PC10. This check is overbroad as it does not take any * of the following into account: * * - ACPI tables * - Enablement of intel_idle * - Command line arguments which limit intel_idle C-state support * * That's perfectly fine. HPET is a piece of hardware designed by committee * and the only reasons why it is still in use on modern systems is the * fact that it is impossible to reliably query TSC and CPU frequency via * CPUID or firmware. * * If HPET is functional it is useful for calibrating TSC, but this can be * done via PMTIMER as well which seems to be the last remaining timer on * X86/INTEL platforms that has not been completely wreckaged by feature * creep. * * In theory HPET support should be removed altogether, but there are older * systems out there which depend on it because TSC and APIC timer are * dysfunctional in deeper C-states. * * It's only 20 years now that hardware people have been asked to provide * reliable and discoverable facilities which can be used for timekeeping * and per CPU timer interrupts. * * The probability that this problem is going to be solved in the * foreseeable future is close to zero, so the kernel has to be cluttered * with heuristics to keep up with the ever growing amount of hardware and * firmware trainwrecks. Hopefully some day hardware people will understand * that the approach of "This can be fixed in software" is not sustainable. * Hope dies last... */ static bool __init hpet_is_pc10_damaged(void) { unsigned long long pcfg; /* Check whether PC10 substates are supported */ if (!mwait_pc10_supported()) return false; /* Check whether PC10 is enabled in PKG C-state limit */ rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, pcfg); if ((pcfg & 0xF) < 8) return false; if (hpet_force_user) { pr_warn("HPET force enabled via command line, but dysfunctional in PC10.\n"); return false; } pr_info("HPET dysfunctional in PC10. Force disabled.\n"); boot_hpet_disable = true; return true; } /** * hpet_enable - Try to setup the HPET timer. Returns 1 on success. */ int __init hpet_enable(void) { u32 hpet_period, cfg, id, irq; unsigned int i, channels; struct hpet_channel *hc; u64 freq; if (!is_hpet_capable()) return 0; if (hpet_is_pc10_damaged()) return 0; hpet_set_mapping(); if (!hpet_virt_address) return 0; /* Validate that the config register is working */ if (!hpet_cfg_working()) goto out_nohpet; /* * Read the period and check for a sane value: */ hpet_period = hpet_readl(HPET_PERIOD); if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD) goto out_nohpet; /* The period is a femtoseconds value. Convert it to a frequency. */ freq = FSEC_PER_SEC; do_div(freq, hpet_period); hpet_freq = freq; /* * Read the HPET ID register to retrieve the IRQ routing * information and the number of channels */ id = hpet_readl(HPET_ID); hpet_print_config(); /* This is the HPET channel number which is zero based */ channels = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1; /* * The legacy routing mode needs at least two channels, tick timer * and the rtc emulation channel. */ if (IS_ENABLED(CONFIG_HPET_EMULATE_RTC) && channels < 2) goto out_nohpet; hc = kcalloc(channels, sizeof(*hc), GFP_KERNEL); if (!hc) { pr_warn("Disabling HPET.\n"); goto out_nohpet; } hpet_base.channels = hc; hpet_base.nr_channels = channels; /* Read, store and sanitize the global configuration */ cfg = hpet_readl(HPET_CFG); hpet_base.boot_cfg = cfg; cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY); hpet_writel(cfg, HPET_CFG); if (cfg) pr_warn("Global config: Unknown bits %#x\n", cfg); /* Read, store and sanitize the per channel configuration */ for (i = 0; i < channels; i++, hc++) { hc->num = i; cfg = hpet_readl(HPET_Tn_CFG(i)); hc->boot_cfg = cfg; irq = (cfg & Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT; hc->irq = irq; cfg &= ~(HPET_TN_ENABLE | HPET_TN_LEVEL | HPET_TN_FSB); hpet_writel(cfg, HPET_Tn_CFG(i)); cfg &= ~(HPET_TN_PERIODIC | HPET_TN_PERIODIC_CAP | HPET_TN_64BIT_CAP | HPET_TN_32BIT | HPET_TN_ROUTE | HPET_TN_FSB | HPET_TN_FSB_CAP); if (cfg) pr_warn("Channel #%u config: Unknown bits %#x\n", i, cfg); } hpet_print_config(); /* * Validate that the counter is counting. This needs to be done * after sanitizing the config registers to properly deal with * force enabled HPETs. */ if (!hpet_counting()) goto out_nohpet; if (tsc_clocksource_watchdog_disabled()) clocksource_hpet.flags |= CLOCK_SOURCE_MUST_VERIFY; clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq); if (id & HPET_ID_LEGSUP) { hpet_legacy_clockevent_register(&hpet_base.channels[0]); hpet_base.channels[0].mode = HPET_MODE_LEGACY; if (IS_ENABLED(CONFIG_HPET_EMULATE_RTC)) hpet_base.channels[1].mode = HPET_MODE_LEGACY; return 1; } return 0; out_nohpet: kfree(hpet_base.channels); hpet_base.channels = NULL; hpet_base.nr_channels = 0; hpet_clear_mapping(); hpet_address = 0; return 0; } /* * The late initialization runs after the PCI quirks have been invoked * which might have detected a system on which the HPET can be enforced. * * Also, the MSI machinery is not working yet when the HPET is initialized * early. * * If the HPET is enabled, then: * * 1) Reserve one channel for /dev/hpet if CONFIG_HPET=y * 2) Reserve up to num_possible_cpus() channels as per CPU clockevents * 3) Setup /dev/hpet if CONFIG_HPET=y * 4) Register hotplug callbacks when clockevents are available */ static __init int hpet_late_init(void) { int ret; if (!hpet_address) { if (!force_hpet_address) return -ENODEV; hpet_address = force_hpet_address; hpet_enable(); } if (!hpet_virt_address) return -ENODEV; hpet_select_device_channel(); hpet_select_clockevents(); hpet_reserve_platform_timers(); hpet_print_config(); if (!hpet_base.nr_clockevents) return 0; ret = cpuhp_setup_state(CPUHP_AP_X86_HPET_ONLINE, "x86/hpet:online", hpet_cpuhp_online, NULL); if (ret) return ret; ret = cpuhp_setup_state(CPUHP_X86_HPET_DEAD, "x86/hpet:dead", NULL, hpet_cpuhp_dead); if (ret) goto err_cpuhp; return 0; err_cpuhp: cpuhp_remove_state(CPUHP_AP_X86_HPET_ONLINE); return ret; } fs_initcall(hpet_late_init); void hpet_disable(void) { unsigned int i; u32 cfg; if (!is_hpet_capable() || !hpet_virt_address) return; /* Restore boot configuration with the enable bit cleared */ cfg = hpet_base.boot_cfg; cfg &= ~HPET_CFG_ENABLE; hpet_writel(cfg, HPET_CFG); /* Restore the channel boot configuration */ for (i = 0; i < hpet_base.nr_channels; i++) hpet_writel(hpet_base.channels[i].boot_cfg, HPET_Tn_CFG(i)); /* If the HPET was enabled at boot time, reenable it */ if (hpet_base.boot_cfg & HPET_CFG_ENABLE) hpet_writel(hpet_base.boot_cfg, HPET_CFG); } #ifdef CONFIG_HPET_EMULATE_RTC /* * HPET in LegacyReplacement mode eats up the RTC interrupt line. When HPET * is enabled, we support RTC interrupt functionality in software. * * RTC has 3 kinds of interrupts: * * 1) Update Interrupt - generate an interrupt, every second, when the * RTC clock is updated * 2) Alarm Interrupt - generate an interrupt at a specific time of day * 3) Periodic Interrupt - generate periodic interrupt, with frequencies * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all frequencies in powers of 2) * * (1) and (2) above are implemented using polling at a frequency of 64 Hz: * DEFAULT_RTC_INT_FREQ. * * The exact frequency is a tradeoff between accuracy and interrupt overhead. * * For (3), we use interrupts at 64 Hz, or the user specified periodic frequency, * if it's higher. */ #include <linux/mc146818rtc.h> #include <linux/rtc.h> #define DEFAULT_RTC_INT_FREQ 64 #define DEFAULT_RTC_SHIFT 6 #define RTC_NUM_INTS 1 static unsigned long hpet_rtc_flags; static int hpet_prev_update_sec; static struct rtc_time hpet_alarm_time; static unsigned long hpet_pie_count; static u32 hpet_t1_cmp; static u32 hpet_default_delta; static u32 hpet_pie_delta; static unsigned long hpet_pie_limit; static rtc_irq_handler irq_handler; /* * Check that the HPET counter c1 is ahead of c2 */ static inline int hpet_cnt_ahead(u32 c1, u32 c2) { return (s32)(c2 - c1) < 0; } /* * Registers a IRQ handler. */ int hpet_register_irq_handler(rtc_irq_handler handler) { if (!is_hpet_enabled()) return -ENODEV; if (irq_handler) return -EBUSY; irq_handler = handler; return 0; } EXPORT_SYMBOL_GPL(hpet_register_irq_handler); /* * Deregisters the IRQ handler registered with hpet_register_irq_handler() * and does cleanup. */ void hpet_unregister_irq_handler(rtc_irq_handler handler) { if (!is_hpet_enabled()) return; irq_handler = NULL; hpet_rtc_flags = 0; } EXPORT_SYMBOL_GPL(hpet_unregister_irq_handler); /* * Channel 1 for RTC emulation. We use one shot mode, as periodic mode * is not supported by all HPET implementations for channel 1. * * hpet_rtc_timer_init() is called when the rtc is initialized. */ int hpet_rtc_timer_init(void) { unsigned int cfg, cnt, delta; unsigned long flags; if (!is_hpet_enabled()) return 0; if (!hpet_default_delta) { struct clock_event_device *evt = &hpet_base.channels[0].evt; uint64_t clc; clc = (uint64_t) evt->mult * NSEC_PER_SEC; clc >>= evt->shift + DEFAULT_RTC_SHIFT; hpet_default_delta = clc; } if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit) delta = hpet_default_delta; else delta = hpet_pie_delta; local_irq_save(flags); cnt = delta + hpet_readl(HPET_COUNTER); hpet_writel(cnt, HPET_T1_CMP); hpet_t1_cmp = cnt; cfg = hpet_readl(HPET_T1_CFG); cfg &= ~HPET_TN_PERIODIC; cfg |= HPET_TN_ENABLE | HPET_TN_32BIT; hpet_writel(cfg, HPET_T1_CFG); local_irq_restore(flags); return 1; } EXPORT_SYMBOL_GPL(hpet_rtc_timer_init); static void hpet_disable_rtc_channel(void) { u32 cfg = hpet_readl(HPET_T1_CFG); cfg &= ~HPET_TN_ENABLE; hpet_writel(cfg, HPET_T1_CFG); } /* * The functions below are called from rtc driver. * Return 0 if HPET is not being used. * Otherwise do the necessary changes and return 1. */ int hpet_mask_rtc_irq_bit(unsigned long bit_mask) { if (!is_hpet_enabled()) return 0; hpet_rtc_flags &= ~bit_mask; if (unlikely(!hpet_rtc_flags)) hpet_disable_rtc_channel(); return 1; } EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit); int hpet_set_rtc_irq_bit(unsigned long bit_mask) { unsigned long oldbits = hpet_rtc_flags; if (!is_hpet_enabled()) return 0; hpet_rtc_flags |= bit_mask; if ((bit_mask & RTC_UIE) && !(oldbits & RTC_UIE)) hpet_prev_update_sec = -1; if (!oldbits) hpet_rtc_timer_init(); return 1; } EXPORT_SYMBOL_GPL(hpet_set_rtc_irq_bit); int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec) { if (!is_hpet_enabled()) return 0; hpet_alarm_time.tm_hour = hrs; hpet_alarm_time.tm_min = min; hpet_alarm_time.tm_sec = sec; return 1; } EXPORT_SYMBOL_GPL(hpet_set_alarm_time); int hpet_set_periodic_freq(unsigned long freq) { uint64_t clc; if (!is_hpet_enabled()) return 0; if (freq <= DEFAULT_RTC_INT_FREQ) { hpet_pie_limit = DEFAULT_RTC_INT_FREQ / freq; } else { struct clock_event_device *evt = &hpet_base.channels[0].evt; clc = (uint64_t) evt->mult * NSEC_PER_SEC; do_div(clc, freq); clc >>= evt->shift; hpet_pie_delta = clc; hpet_pie_limit = 0; } return 1; } EXPORT_SYMBOL_GPL(hpet_set_periodic_freq); static void hpet_rtc_timer_reinit(void) { unsigned int delta; int lost_ints = -1; if (unlikely(!hpet_rtc_flags)) hpet_disable_rtc_channel(); if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit) delta = hpet_default_delta; else delta = hpet_pie_delta; /* * Increment the comparator value until we are ahead of the * current count. */ do { hpet_t1_cmp += delta; hpet_writel(hpet_t1_cmp, HPET_T1_CMP); lost_ints++; } while (!hpet_cnt_ahead(hpet_t1_cmp, hpet_readl(HPET_COUNTER))); if (lost_ints) { if (hpet_rtc_flags & RTC_PIE) hpet_pie_count += lost_ints; if (printk_ratelimit()) pr_warn("Lost %d RTC interrupts\n", lost_ints); } } irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) { struct rtc_time curr_time; unsigned long rtc_int_flag = 0; hpet_rtc_timer_reinit(); memset(&curr_time, 0, sizeof(struct rtc_time)); if (hpet_rtc_flags & (RTC_UIE | RTC_AIE)) { if (unlikely(mc146818_get_time(&curr_time, 10) < 0)) { pr_err_ratelimited("unable to read current time from RTC\n"); return IRQ_HANDLED; } } if (hpet_rtc_flags & RTC_UIE && curr_time.tm_sec != hpet_prev_update_sec) { if (hpet_prev_update_sec >= 0) rtc_int_flag = RTC_UF; hpet_prev_update_sec = curr_time.tm_sec; } if (hpet_rtc_flags & RTC_PIE && ++hpet_pie_count >= hpet_pie_limit) { rtc_int_flag |= RTC_PF; hpet_pie_count = 0; } if (hpet_rtc_flags & RTC_AIE && (curr_time.tm_sec == hpet_alarm_time.tm_sec) && (curr_time.tm_min == hpet_alarm_time.tm_min) && (curr_time.tm_hour == hpet_alarm_time.tm_hour)) rtc_int_flag |= RTC_AF; if (rtc_int_flag) { rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8)); if (irq_handler) irq_handler(rtc_int_flag, dev_id); } return IRQ_HANDLED; } EXPORT_SYMBOL_GPL(hpet_rtc_interrupt); #endif
293 294 292 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 // SPDX-License-Identifier: GPL-2.0 /* Lock down the kernel * * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/security.h> #include <linux/export.h> #include <linux/lsm_hooks.h> #include <uapi/linux/lsm.h> static enum lockdown_reason kernel_locked_down; static const enum lockdown_reason lockdown_levels[] = {LOCKDOWN_NONE, LOCKDOWN_INTEGRITY_MAX, LOCKDOWN_CONFIDENTIALITY_MAX}; /* * Put the kernel into lock-down mode. */ static int lock_kernel_down(const char *where, enum lockdown_reason level) { if (kernel_locked_down >= level) return -EPERM; kernel_locked_down = level; pr_notice("Kernel is locked down from %s; see man kernel_lockdown.7\n", where); return 0; } static int __init lockdown_param(char *level) { if (!level) return -EINVAL; if (strcmp(level, "integrity") == 0) lock_kernel_down("command line", LOCKDOWN_INTEGRITY_MAX); else if (strcmp(level, "confidentiality") == 0) lock_kernel_down("command line", LOCKDOWN_CONFIDENTIALITY_MAX); else return -EINVAL; return 0; } early_param("lockdown", lockdown_param); /** * lockdown_is_locked_down - Find out if the kernel is locked down * @what: Tag to use in notice generated if lockdown is in effect */ static int lockdown_is_locked_down(enum lockdown_reason what) { if (WARN(what >= LOCKDOWN_CONFIDENTIALITY_MAX, "Invalid lockdown reason")) return -EPERM; if (kernel_locked_down >= what) { if (lockdown_reasons[what]) pr_notice_ratelimited("Lockdown: %s: %s is restricted; see man kernel_lockdown.7\n", current->comm, lockdown_reasons[what]); return -EPERM; } return 0; } static struct security_hook_list lockdown_hooks[] __ro_after_init = { LSM_HOOK_INIT(locked_down, lockdown_is_locked_down), }; static const struct lsm_id lockdown_lsmid = { .name = "lockdown", .id = LSM_ID_LOCKDOWN, }; static int __init lockdown_lsm_init(void) { #if defined(CONFIG_LOCK_DOWN_KERNEL_FORCE_INTEGRITY) lock_kernel_down("Kernel configuration", LOCKDOWN_INTEGRITY_MAX); #elif defined(CONFIG_LOCK_DOWN_KERNEL_FORCE_CONFIDENTIALITY) lock_kernel_down("Kernel configuration", LOCKDOWN_CONFIDENTIALITY_MAX); #endif security_add_hooks(lockdown_hooks, ARRAY_SIZE(lockdown_hooks), &lockdown_lsmid); return 0; } static ssize_t lockdown_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { char temp[80] = ""; int i, offset = 0; for (i = 0; i < ARRAY_SIZE(lockdown_levels); i++) { enum lockdown_reason level = lockdown_levels[i]; if (lockdown_reasons[level]) { const char *label = lockdown_reasons[level]; if (kernel_locked_down == level) offset += sprintf(temp+offset, "[%s] ", label); else offset += sprintf(temp+offset, "%s ", label); } } /* Convert the last space to a newline if needed. */ if (offset > 0) temp[offset-1] = '\n'; return simple_read_from_buffer(buf, count, ppos, temp, strlen(temp)); } static ssize_t lockdown_write(struct file *file, const char __user *buf, size_t n, loff_t *ppos) { char *state; int i, len, err = -EINVAL; state = memdup_user_nul(buf, n); if (IS_ERR(state)) return PTR_ERR(state); len = strlen(state); if (len && state[len-1] == '\n') { state[len-1] = '\0'; len--; } for (i = 0; i < ARRAY_SIZE(lockdown_levels); i++) { enum lockdown_reason level = lockdown_levels[i]; const char *label = lockdown_reasons[level]; if (label && !strcmp(state, label)) err = lock_kernel_down("securityfs", level); } kfree(state); return err ? err : n; } static const struct file_operations lockdown_ops = { .read = lockdown_read, .write = lockdown_write, }; static int __init lockdown_secfs_init(void) { struct dentry *dentry; dentry = securityfs_create_file("lockdown", 0644, NULL, NULL, &lockdown_ops); return PTR_ERR_OR_ZERO(dentry); } core_initcall(lockdown_secfs_init); #ifdef CONFIG_SECURITY_LOCKDOWN_LSM_EARLY DEFINE_EARLY_LSM(lockdown) = { #else DEFINE_LSM(lockdown) = { #endif .name = "lockdown", .init = lockdown_lsm_init, };
4 4 4 4 4 1 4 4 4 1 9 11 4 2 9 7 4 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 // SPDX-License-Identifier: GPL-2.0-or-later /* * IP Payload Compression Protocol (IPComp) for IPv6 - RFC3173 * * Copyright (C)2003 USAGI/WIDE Project * * Author Mitsuru KANDA <mk@linux-ipv6.org> */ /* * [Memo] * * Outbound: * The compression of IP datagram MUST be done before AH/ESP processing, * fragmentation, and the addition of Hop-by-Hop/Routing header. * * Inbound: * The decompression of IP datagram MUST be done after the reassembly, * AH/ESP processing. */ #define pr_fmt(fmt) "IPv6: " fmt #include <linux/module.h> #include <net/ip.h> #include <net/xfrm.h> #include <net/ipcomp.h> #include <linux/crypto.h> #include <linux/err.h> #include <linux/pfkeyv2.h> #include <linux/random.h> #include <linux/percpu.h> #include <linux/smp.h> #include <linux/list.h> #include <linux/vmalloc.h> #include <linux/rtnetlink.h> #include <net/ip6_route.h> #include <net/icmp.h> #include <net/ipv6.h> #include <net/protocol.h> #include <linux/ipv6.h> #include <linux/icmpv6.h> #include <linux/mutex.h> static int ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { struct net *net = dev_net(skb->dev); __be32 spi; const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data; struct ip_comp_hdr *ipcomph = (struct ip_comp_hdr *)(skb->data + offset); struct xfrm_state *x; if (type != ICMPV6_PKT_TOOBIG && type != NDISC_REDIRECT) return 0; spi = htonl(ntohs(ipcomph->cpi)); x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, spi, IPPROTO_COMP, AF_INET6); if (!x) return 0; if (type == NDISC_REDIRECT) ip6_redirect(skb, net, skb->dev->ifindex, 0, sock_net_uid(net, NULL)); else ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL)); xfrm_state_put(x); return 0; } static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x) { struct net *net = xs_net(x); struct xfrm_state *t = NULL; t = xfrm_state_alloc(net); if (!t) goto out; t->id.proto = IPPROTO_IPV6; t->id.spi = xfrm6_tunnel_alloc_spi(net, (xfrm_address_t *)&x->props.saddr); if (!t->id.spi) goto error; memcpy(t->id.daddr.a6, x->id.daddr.a6, sizeof(struct in6_addr)); memcpy(&t->sel, &x->sel, sizeof(t->sel)); t->props.family = AF_INET6; t->props.mode = x->props.mode; memcpy(t->props.saddr.a6, x->props.saddr.a6, sizeof(struct in6_addr)); memcpy(&t->mark, &x->mark, sizeof(t->mark)); t->if_id = x->if_id; if (xfrm_init_state(t)) goto error; atomic_set(&t->tunnel_users, 1); out: return t; error: t->km.state = XFRM_STATE_DEAD; xfrm_state_put(t); t = NULL; goto out; } static int ipcomp6_tunnel_attach(struct xfrm_state *x) { struct net *net = xs_net(x); int err = 0; struct xfrm_state *t = NULL; __be32 spi; u32 mark = x->mark.m & x->mark.v; spi = xfrm6_tunnel_spi_lookup(net, (xfrm_address_t *)&x->props.saddr); if (spi) t = xfrm_state_lookup(net, mark, (xfrm_address_t *)&x->id.daddr, spi, IPPROTO_IPV6, AF_INET6); if (!t) { t = ipcomp6_tunnel_create(x); if (!t) { err = -EINVAL; goto out; } xfrm_state_insert(t); xfrm_state_hold(t); } x->tunnel = t; atomic_inc(&t->tunnel_users); out: return err; } static int ipcomp6_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack) { int err = -EINVAL; x->props.header_len = 0; switch (x->props.mode) { case XFRM_MODE_TRANSPORT: break; case XFRM_MODE_TUNNEL: x->props.header_len += sizeof(struct ipv6hdr); break; default: NL_SET_ERR_MSG(extack, "Unsupported XFRM mode for IPcomp"); goto out; } err = ipcomp_init_state(x, extack); if (err) goto out; if (x->props.mode == XFRM_MODE_TUNNEL) { err = ipcomp6_tunnel_attach(x); if (err) { NL_SET_ERR_MSG(extack, "Kernel error: failed to initialize the associated state"); goto out; } } err = 0; out: return err; } static int ipcomp6_rcv_cb(struct sk_buff *skb, int err) { return 0; } static const struct xfrm_type ipcomp6_type = { .owner = THIS_MODULE, .proto = IPPROTO_COMP, .init_state = ipcomp6_init_state, .destructor = ipcomp_destroy, .input = ipcomp_input, .output = ipcomp_output, }; static struct xfrm6_protocol ipcomp6_protocol = { .handler = xfrm6_rcv, .input_handler = xfrm_input, .cb_handler = ipcomp6_rcv_cb, .err_handler = ipcomp6_err, .priority = 0, }; static int __init ipcomp6_init(void) { if (xfrm_register_type(&ipcomp6_type, AF_INET6) < 0) { pr_info("%s: can't add xfrm type\n", __func__); return -EAGAIN; } if (xfrm6_protocol_register(&ipcomp6_protocol, IPPROTO_COMP) < 0) { pr_info("%s: can't add protocol\n", __func__); xfrm_unregister_type(&ipcomp6_type, AF_INET6); return -EAGAIN; } return 0; } static void __exit ipcomp6_fini(void) { if (xfrm6_protocol_deregister(&ipcomp6_protocol, IPPROTO_COMP) < 0) pr_info("%s: can't remove protocol\n", __func__); xfrm_unregister_type(&ipcomp6_type, AF_INET6); } module_init(ipcomp6_init); module_exit(ipcomp6_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("IP Payload Compression Protocol (IPComp) for IPv6 - RFC3173"); MODULE_AUTHOR("Mitsuru KANDA <mk@linux-ipv6.org>"); MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_COMP);
75 71 70 6 2 51 3 55 11 17 14 1 55 54 49 49 13 249 249 7 87 2 2 1 85 249 87 87 249 249 249 249 85 4 2 2 87 87 4 4 4 4 4 2 2 2 3 3 245 246 245 246 141 246 3 3 23 15 3 22 23 2 16 68 16 58 1 1 1 1 1 22 20 2 22 22 22 22 29 28 2 2 2 28 27 8 2 7 17 28 6 3 5 2 5 4 7 5 1 12 28 1 1 1 1 28 28 1 1 1 1 1 1 1 64 64 64 46 46 64 3 64 40 40 1 73 28 1 28 5 23 28 24 24 1 1 23 3 3 3 2 3 1 1 5 73 4 32 73 31 1 9 9 9 6 6 11 11 11 11 5 1 1 1 1 2 2 1 1 1 1 2 2 4 4 2 1 1 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 53 49 2 1 49 49 49 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 2 1 1 1 1 1 1 28 28 29 28 28 31 31 2 8 5 5 8 4 4 8 87 22 27 30 1 30 3 3 1 1 1 1 26 26 26 26 49 1 4 1 2 1 4 30 30 30 30 1 1 2 3 1 1 1 85 87 246 246 140 141 245 141 14 7 7 1 1 1 1 7 7 1 51 6 75 141 45 45 12 12 141 116 141 5 4 139 86 67 24 20 3 20 3 2 1 1 1 1 5 1 4 5 5 8 8 86 140 141 106 87 68 67 1 67 67 67 87 29 28 66 1 66 62 87 87 87 199 87 3 9 3 9 9 1 246 91 91 91 91 67 67 67 67 67 67 66 67 1 67 53 67 67 4 2 91 36 33 89 1 89 80 20 17 73 82 82 3 82 79 6 76 80 2 1 73 1 1 73 73 73 73 29 73 25 73 3 2 1 3 3 3 3 2 70 70 34 7 7 41 73 91 2 2 1 1 6 2 44 44 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 // SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** * emulate.c * * Generic x86 (32-bit and 64-bit) instruction decoder and emulator. * * Copyright (c) 2005 Keir Fraser * * Linux coding style, mod r/m decoder, segment base fixes, real-mode * privileged instructions: * * Copyright (C) 2006 Qumranet * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Avi Kivity <avi@qumranet.com> * Yaniv Kamay <yaniv@qumranet.com> * * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4 */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kvm_host.h> #include "kvm_cache_regs.h" #include "kvm_emulate.h" #include <linux/stringify.h> #include <asm/debugreg.h> #include <asm/nospec-branch.h> #include <asm/ibt.h> #include "x86.h" #include "tss.h" #include "mmu.h" #include "pmu.h" /* * Operand types */ #define OpNone 0ull #define OpImplicit 1ull /* No generic decode */ #define OpReg 2ull /* Register */ #define OpMem 3ull /* Memory */ #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */ #define OpDI 5ull /* ES:DI/EDI/RDI */ #define OpMem64 6ull /* Memory, 64-bit */ #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */ #define OpDX 8ull /* DX register */ #define OpCL 9ull /* CL register (for shifts) */ #define OpImmByte 10ull /* 8-bit sign extended immediate */ #define OpOne 11ull /* Implied 1 */ #define OpImm 12ull /* Sign extended up to 32-bit immediate */ #define OpMem16 13ull /* Memory operand (16-bit). */ #define OpMem32 14ull /* Memory operand (32-bit). */ #define OpImmU 15ull /* Immediate operand, zero extended */ #define OpSI 16ull /* SI/ESI/RSI */ #define OpImmFAddr 17ull /* Immediate far address */ #define OpMemFAddr 18ull /* Far address in memory */ #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */ #define OpES 20ull /* ES */ #define OpCS 21ull /* CS */ #define OpSS 22ull /* SS */ #define OpDS 23ull /* DS */ #define OpFS 24ull /* FS */ #define OpGS 25ull /* GS */ #define OpMem8 26ull /* 8-bit zero extended memory operand */ #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */ #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */ #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */ #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */ #define OpBits 5 /* Width of operand field */ #define OpMask ((1ull << OpBits) - 1) /* * Opcode effective-address decode tables. * Note that we only emulate instructions that have at least one memory * operand (excluding implicit stack references). We assume that stack * references and instruction fetches will never occur in special memory * areas that require emulation. So, for example, 'mov <imm>,<reg>' need * not be handled. */ /* Operand sizes: 8-bit operands or specified/overridden size. */ #define ByteOp (1<<0) /* 8-bit operands. */ /* Destination operand type. */ #define DstShift 1 #define ImplicitOps (OpImplicit << DstShift) #define DstReg (OpReg << DstShift) #define DstMem (OpMem << DstShift) #define DstAcc (OpAcc << DstShift) #define DstDI (OpDI << DstShift) #define DstMem64 (OpMem64 << DstShift) #define DstMem16 (OpMem16 << DstShift) #define DstImmUByte (OpImmUByte << DstShift) #define DstDX (OpDX << DstShift) #define DstAccLo (OpAccLo << DstShift) #define DstMask (OpMask << DstShift) /* Source operand type. */ #define SrcShift 6 #define SrcNone (OpNone << SrcShift) #define SrcReg (OpReg << SrcShift) #define SrcMem (OpMem << SrcShift) #define SrcMem16 (OpMem16 << SrcShift) #define SrcMem32 (OpMem32 << SrcShift) #define SrcImm (OpImm << SrcShift) #define SrcImmByte (OpImmByte << SrcShift) #define SrcOne (OpOne << SrcShift) #define SrcImmUByte (OpImmUByte << SrcShift) #define SrcImmU (OpImmU << SrcShift) #define SrcSI (OpSI << SrcShift) #define SrcXLat (OpXLat << SrcShift) #define SrcImmFAddr (OpImmFAddr << SrcShift) #define SrcMemFAddr (OpMemFAddr << SrcShift) #define SrcAcc (OpAcc << SrcShift) #define SrcImmU16 (OpImmU16 << SrcShift) #define SrcImm64 (OpImm64 << SrcShift) #define SrcDX (OpDX << SrcShift) #define SrcMem8 (OpMem8 << SrcShift) #define SrcAccHi (OpAccHi << SrcShift) #define SrcMask (OpMask << SrcShift) #define BitOp (1<<11) #define MemAbs (1<<12) /* Memory operand is absolute displacement */ #define String (1<<13) /* String instruction (rep capable) */ #define Stack (1<<14) /* Stack instruction (push/pop) */ #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */ #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */ #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */ #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */ #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */ #define Escape (5<<15) /* Escape to coprocessor instruction */ #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */ #define ModeDual (7<<15) /* Different instruction for 32/64 bit */ #define Sse (1<<18) /* SSE Vector instruction */ /* Generic ModRM decode. */ #define ModRM (1<<19) /* Destination is only written; never read. */ #define Mov (1<<20) /* Misc flags */ #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */ #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */ #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */ #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */ #define Undefined (1<<25) /* No Such Instruction */ #define Lock (1<<26) /* lock prefix is allowed for the instruction */ #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */ #define No64 (1<<28) #define PageTable (1 << 29) /* instruction used to write page table */ #define NotImpl (1 << 30) /* instruction is not implemented */ /* Source 2 operand type */ #define Src2Shift (31) #define Src2None (OpNone << Src2Shift) #define Src2Mem (OpMem << Src2Shift) #define Src2CL (OpCL << Src2Shift) #define Src2ImmByte (OpImmByte << Src2Shift) #define Src2One (OpOne << Src2Shift) #define Src2Imm (OpImm << Src2Shift) #define Src2ES (OpES << Src2Shift) #define Src2CS (OpCS << Src2Shift) #define Src2SS (OpSS << Src2Shift) #define Src2DS (OpDS << Src2Shift) #define Src2FS (OpFS << Src2Shift) #define Src2GS (OpGS << Src2Shift) #define Src2Mask (OpMask << Src2Shift) #define Mmx ((u64)1 << 40) /* MMX Vector instruction */ #define AlignMask ((u64)7 << 41) #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */ #define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */ #define Avx ((u64)3 << 41) /* Advanced Vector Extensions */ #define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */ #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */ #define NoWrite ((u64)1 << 45) /* No writeback */ #define SrcWrite ((u64)1 << 46) /* Write back src operand */ #define NoMod ((u64)1 << 47) /* Mod field is ignored */ #define Intercept ((u64)1 << 48) /* Has valid intercept field */ #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */ #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */ #define NearBranch ((u64)1 << 52) /* Near branches */ #define No16 ((u64)1 << 53) /* No 16 bit operand */ #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */ #define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */ #define IsBranch ((u64)1 << 56) /* Instruction is considered a branch. */ #define DstXacc (DstAccLo | SrcAccHi | SrcWrite) #define X2(x...) x, x #define X3(x...) X2(x), x #define X4(x...) X2(x), X2(x) #define X5(x...) X4(x), x #define X6(x...) X4(x), X2(x) #define X7(x...) X4(x), X3(x) #define X8(x...) X4(x), X4(x) #define X16(x...) X8(x), X8(x) struct opcode { u64 flags; u8 intercept; u8 pad[7]; union { int (*execute)(struct x86_emulate_ctxt *ctxt); const struct opcode *group; const struct group_dual *gdual; const struct gprefix *gprefix; const struct escape *esc; const struct instr_dual *idual; const struct mode_dual *mdual; void (*fastop)(struct fastop *fake); } u; int (*check_perm)(struct x86_emulate_ctxt *ctxt); }; struct group_dual { struct opcode mod012[8]; struct opcode mod3[8]; }; struct gprefix { struct opcode pfx_no; struct opcode pfx_66; struct opcode pfx_f2; struct opcode pfx_f3; }; struct escape { struct opcode op[8]; struct opcode high[64]; }; struct instr_dual { struct opcode mod012; struct opcode mod3; }; struct mode_dual { struct opcode mode32; struct opcode mode64; }; #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a enum x86_transfer_type { X86_TRANSFER_NONE, X86_TRANSFER_CALL_JMP, X86_TRANSFER_RET, X86_TRANSFER_TASK_SWITCH, }; static void writeback_registers(struct x86_emulate_ctxt *ctxt) { unsigned long dirty = ctxt->regs_dirty; unsigned reg; for_each_set_bit(reg, &dirty, NR_EMULATOR_GPRS) ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]); } static void invalidate_registers(struct x86_emulate_ctxt *ctxt) { ctxt->regs_dirty = 0; ctxt->regs_valid = 0; } /* * These EFLAGS bits are restored from saved value during emulation, and * any changes are written back to the saved value after emulation. */ #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\ X86_EFLAGS_PF|X86_EFLAGS_CF) #ifdef CONFIG_X86_64 #define ON64(x) x #else #define ON64(x) #endif /* * fastop functions have a special calling convention: * * dst: rax (in/out) * src: rdx (in/out) * src2: rcx (in) * flags: rflags (in/out) * ex: rsi (in:fastop pointer, out:zero if exception) * * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for * different operand sizes can be reached by calculation, rather than a jump * table (which would be bigger than the code). * * The 16 byte alignment, considering 5 bytes for the RET thunk, 3 for ENDBR * and 1 for the straight line speculation INT3, leaves 7 bytes for the * body of the function. Currently none is larger than 4. */ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop); #define FASTOP_SIZE 16 #define __FOP_FUNC(name) \ ".align " __stringify(FASTOP_SIZE) " \n\t" \ ".type " name ", @function \n\t" \ name ":\n\t" \ ASM_ENDBR \ IBT_NOSEAL(name) #define FOP_FUNC(name) \ __FOP_FUNC(#name) #define __FOP_RET(name) \ "11: " ASM_RET \ ".size " name ", .-" name "\n\t" #define FOP_RET(name) \ __FOP_RET(#name) #define __FOP_START(op, align) \ extern void em_##op(struct fastop *fake); \ asm(".pushsection .text, \"ax\" \n\t" \ ".global em_" #op " \n\t" \ ".align " __stringify(align) " \n\t" \ "em_" #op ":\n\t" #define FOP_START(op) __FOP_START(op, FASTOP_SIZE) #define FOP_END \ ".popsection") #define __FOPNOP(name) \ __FOP_FUNC(name) \ __FOP_RET(name) #define FOPNOP() \ __FOPNOP(__stringify(__UNIQUE_ID(nop))) #define FOP1E(op, dst) \ __FOP_FUNC(#op "_" #dst) \ "10: " #op " %" #dst " \n\t" \ __FOP_RET(#op "_" #dst) #define FOP1EEX(op, dst) \ FOP1E(op, dst) _ASM_EXTABLE_TYPE_REG(10b, 11b, EX_TYPE_ZERO_REG, %%esi) #define FASTOP1(op) \ FOP_START(op) \ FOP1E(op##b, al) \ FOP1E(op##w, ax) \ FOP1E(op##l, eax) \ ON64(FOP1E(op##q, rax)) \ FOP_END /* 1-operand, using src2 (for MUL/DIV r/m) */ #define FASTOP1SRC2(op, name) \ FOP_START(name) \ FOP1E(op, cl) \ FOP1E(op, cx) \ FOP1E(op, ecx) \ ON64(FOP1E(op, rcx)) \ FOP_END /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */ #define FASTOP1SRC2EX(op, name) \ FOP_START(name) \ FOP1EEX(op, cl) \ FOP1EEX(op, cx) \ FOP1EEX(op, ecx) \ ON64(FOP1EEX(op, rcx)) \ FOP_END #define FOP2E(op, dst, src) \ __FOP_FUNC(#op "_" #dst "_" #src) \ #op " %" #src ", %" #dst " \n\t" \ __FOP_RET(#op "_" #dst "_" #src) #define FASTOP2(op) \ FOP_START(op) \ FOP2E(op##b, al, dl) \ FOP2E(op##w, ax, dx) \ FOP2E(op##l, eax, edx) \ ON64(FOP2E(op##q, rax, rdx)) \ FOP_END /* 2 operand, word only */ #define FASTOP2W(op) \ FOP_START(op) \ FOPNOP() \ FOP2E(op##w, ax, dx) \ FOP2E(op##l, eax, edx) \ ON64(FOP2E(op##q, rax, rdx)) \ FOP_END /* 2 operand, src is CL */ #define FASTOP2CL(op) \ FOP_START(op) \ FOP2E(op##b, al, cl) \ FOP2E(op##w, ax, cl) \ FOP2E(op##l, eax, cl) \ ON64(FOP2E(op##q, rax, cl)) \ FOP_END /* 2 operand, src and dest are reversed */ #define FASTOP2R(op, name) \ FOP_START(name) \ FOP2E(op##b, dl, al) \ FOP2E(op##w, dx, ax) \ FOP2E(op##l, edx, eax) \ ON64(FOP2E(op##q, rdx, rax)) \ FOP_END #define FOP3E(op, dst, src, src2) \ __FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \ #op " %" #src2 ", %" #src ", %" #dst " \n\t"\ __FOP_RET(#op "_" #dst "_" #src "_" #src2) /* 3-operand, word-only, src2=cl */ #define FASTOP3WCL(op) \ FOP_START(op) \ FOPNOP() \ FOP3E(op##w, ax, dx, cl) \ FOP3E(op##l, eax, edx, cl) \ ON64(FOP3E(op##q, rax, rdx, cl)) \ FOP_END /* Special case for SETcc - 1 instruction per cc */ #define FOP_SETCC(op) \ FOP_FUNC(op) \ #op " %al \n\t" \ FOP_RET(op) FOP_START(setcc) FOP_SETCC(seto) FOP_SETCC(setno) FOP_SETCC(setc) FOP_SETCC(setnc) FOP_SETCC(setz) FOP_SETCC(setnz) FOP_SETCC(setbe) FOP_SETCC(setnbe) FOP_SETCC(sets) FOP_SETCC(setns) FOP_SETCC(setp) FOP_SETCC(setnp) FOP_SETCC(setl) FOP_SETCC(setnl) FOP_SETCC(setle) FOP_SETCC(setnle) FOP_END; FOP_START(salc) FOP_FUNC(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET(salc) FOP_END; /* * XXX: inoutclob user must know where the argument is being expanded. * Using asm goto would allow us to remove _fault. */ #define asm_safe(insn, inoutclob...) \ ({ \ int _fault = 0; \ \ asm volatile("1:" insn "\n" \ "2:\n" \ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %[_fault]) \ : [_fault] "+r"(_fault) inoutclob ); \ \ _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \ }) static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt, enum x86_intercept intercept, enum x86_intercept_stage stage) { struct x86_instruction_info info = { .intercept = intercept, .rep_prefix = ctxt->rep_prefix, .modrm_mod = ctxt->modrm_mod, .modrm_reg = ctxt->modrm_reg, .modrm_rm = ctxt->modrm_rm, .src_val = ctxt->src.val64, .dst_val = ctxt->dst.val64, .src_bytes = ctxt->src.bytes, .dst_bytes = ctxt->dst.bytes, .src_type = ctxt->src.type, .dst_type = ctxt->dst.type, .ad_bytes = ctxt->ad_bytes, .rip = ctxt->eip, .next_rip = ctxt->_eip, }; return ctxt->ops->intercept(ctxt, &info, stage); } static void assign_masked(ulong *dest, ulong src, ulong mask) { *dest = (*dest & ~mask) | (src & mask); } static void assign_register(unsigned long *reg, u64 val, int bytes) { /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */ switch (bytes) { case 1: *(u8 *)reg = (u8)val; break; case 2: *(u16 *)reg = (u16)val; break; case 4: *reg = (u32)val; break; /* 64b: zero-extend */ case 8: *reg = val; break; } } static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt) { return (1UL << (ctxt->ad_bytes << 3)) - 1; } static ulong stack_mask(struct x86_emulate_ctxt *ctxt) { u16 sel; struct desc_struct ss; if (ctxt->mode == X86EMUL_MODE_PROT64) return ~0UL; ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS); return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */ } static int stack_size(struct x86_emulate_ctxt *ctxt) { return (__fls(stack_mask(ctxt)) + 1) >> 3; } /* Access/update address held in a register, based on addressing mode. */ static inline unsigned long address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg) { if (ctxt->ad_bytes == sizeof(unsigned long)) return reg; else return reg & ad_mask(ctxt); } static inline unsigned long register_address(struct x86_emulate_ctxt *ctxt, int reg) { return address_mask(ctxt, reg_read(ctxt, reg)); } static void masked_increment(ulong *reg, ulong mask, int inc) { assign_masked(reg, *reg + inc, mask); } static inline void register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc) { ulong *preg = reg_rmw(ctxt, reg); assign_register(preg, *preg + inc, ctxt->ad_bytes); } static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc) { masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc); } static u32 desc_limit_scaled(struct desc_struct *desc) { u32 limit = get_desc_limit(desc); return desc->g ? (limit << 12) | 0xfff : limit; } static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg) { if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) return 0; return ctxt->ops->get_cached_segment_base(ctxt, seg); } static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, u32 error, bool valid) { if (KVM_EMULATOR_BUG_ON(vec > 0x1f, ctxt)) return X86EMUL_UNHANDLEABLE; ctxt->exception.vector = vec; ctxt->exception.error_code = error; ctxt->exception.error_code_valid = valid; return X86EMUL_PROPAGATE_FAULT; } static int emulate_db(struct x86_emulate_ctxt *ctxt) { return emulate_exception(ctxt, DB_VECTOR, 0, false); } static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err) { return emulate_exception(ctxt, GP_VECTOR, err, true); } static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err) { return emulate_exception(ctxt, SS_VECTOR, err, true); } static int emulate_ud(struct x86_emulate_ctxt *ctxt) { return emulate_exception(ctxt, UD_VECTOR, 0, false); } static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err) { return emulate_exception(ctxt, TS_VECTOR, err, true); } static int emulate_de(struct x86_emulate_ctxt *ctxt) { return emulate_exception(ctxt, DE_VECTOR, 0, false); } static int emulate_nm(struct x86_emulate_ctxt *ctxt) { return emulate_exception(ctxt, NM_VECTOR, 0, false); } static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg) { u16 selector; struct desc_struct desc; ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg); return selector; } static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector, unsigned seg) { u16 dummy; u32 base3; struct desc_struct desc; ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg); ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg); } static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt) { return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48; } static inline bool emul_is_noncanonical_address(u64 la, struct x86_emulate_ctxt *ctxt, unsigned int flags) { return !ctxt->ops->is_canonical_addr(ctxt, la, flags); } /* * x86 defines three classes of vector instructions: explicitly * aligned, explicitly unaligned, and the rest, which change behaviour * depending on whether they're AVX encoded or not. * * Also included is CMPXCHG16B which is not a vector instruction, yet it is * subject to the same check. FXSAVE and FXRSTOR are checked here too as their * 512 bytes of data must be aligned to a 16 byte boundary. */ static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size) { u64 alignment = ctxt->d & AlignMask; if (likely(size < 16)) return 1; switch (alignment) { case Unaligned: case Avx: return 1; case Aligned16: return 16; case Aligned: default: return size; } } static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, unsigned *max_size, unsigned size, enum x86emul_mode mode, ulong *linear, unsigned int flags) { struct desc_struct desc; bool usable; ulong la; u32 lim; u16 sel; u8 va_bits; la = seg_base(ctxt, addr.seg) + addr.ea; *max_size = 0; switch (mode) { case X86EMUL_MODE_PROT64: *linear = la = ctxt->ops->get_untagged_addr(ctxt, la, flags); va_bits = ctxt_virt_addr_bits(ctxt); if (!__is_canonical_address(la, va_bits)) goto bad; *max_size = min_t(u64, ~0u, (1ull << va_bits) - la); if (size > *max_size) goto bad; break; default: *linear = la = (u32)la; usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, addr.seg); if (!usable) goto bad; /* code segment in protected mode or read-only data segment */ if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) || !(desc.type & 2)) && (flags & X86EMUL_F_WRITE)) goto bad; /* unreadable code segment */ if (!(flags & X86EMUL_F_FETCH) && (desc.type & 8) && !(desc.type & 2)) goto bad; lim = desc_limit_scaled(&desc); if (!(desc.type & 8) && (desc.type & 4)) { /* expand-down segment */ if (addr.ea <= lim) goto bad; lim = desc.d ? 0xffffffff : 0xffff; } if (addr.ea > lim) goto bad; if (lim == 0xffffffff) *max_size = ~0u; else { *max_size = (u64)lim + 1 - addr.ea; if (size > *max_size) goto bad; } break; } if (la & (insn_alignment(ctxt, size) - 1)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; bad: if (addr.seg == VCPU_SREG_SS) return emulate_ss(ctxt, 0); else return emulate_gp(ctxt, 0); } static int linearize(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, unsigned size, bool write, ulong *linear) { unsigned max_size; return __linearize(ctxt, addr, &max_size, size, ctxt->mode, linear, write ? X86EMUL_F_WRITE : 0); } static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst) { ulong linear; int rc; unsigned max_size; struct segmented_address addr = { .seg = VCPU_SREG_CS, .ea = dst }; if (ctxt->op_bytes != sizeof(unsigned long)) addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1); rc = __linearize(ctxt, addr, &max_size, 1, ctxt->mode, &linear, X86EMUL_F_FETCH); if (rc == X86EMUL_CONTINUE) ctxt->_eip = addr.ea; return rc; } static inline int emulator_recalc_and_set_mode(struct x86_emulate_ctxt *ctxt) { u64 efer; struct desc_struct cs; u16 selector; u32 base3; ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if (!(ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PE)) { /* Real mode. cpu must not have long mode active */ if (efer & EFER_LMA) return X86EMUL_UNHANDLEABLE; ctxt->mode = X86EMUL_MODE_REAL; return X86EMUL_CONTINUE; } if (ctxt->eflags & X86_EFLAGS_VM) { /* Protected/VM86 mode. cpu must not have long mode active */ if (efer & EFER_LMA) return X86EMUL_UNHANDLEABLE; ctxt->mode = X86EMUL_MODE_VM86; return X86EMUL_CONTINUE; } if (!ctxt->ops->get_segment(ctxt, &selector, &cs, &base3, VCPU_SREG_CS)) return X86EMUL_UNHANDLEABLE; if (efer & EFER_LMA) { if (cs.l) { /* Proper long mode */ ctxt->mode = X86EMUL_MODE_PROT64; } else if (cs.d) { /* 32 bit compatibility mode*/ ctxt->mode = X86EMUL_MODE_PROT32; } else { ctxt->mode = X86EMUL_MODE_PROT16; } } else { /* Legacy 32 bit / 16 bit mode */ ctxt->mode = cs.d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; } return X86EMUL_CONTINUE; } static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst) { return assign_eip(ctxt, dst); } static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst) { int rc = emulator_recalc_and_set_mode(ctxt); if (rc != X86EMUL_CONTINUE) return rc; return assign_eip(ctxt, dst); } static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) { return assign_eip_near(ctxt, ctxt->_eip + rel); } static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear, void *data, unsigned size) { return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true); } static int linear_write_system(struct x86_emulate_ctxt *ctxt, ulong linear, void *data, unsigned int size) { return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true); } static int segmented_read_std(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, void *data, unsigned size) { int rc; ulong linear; rc = linearize(ctxt, addr, size, false, &linear); if (rc != X86EMUL_CONTINUE) return rc; return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false); } static int segmented_write_std(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, void *data, unsigned int size) { int rc; ulong linear; rc = linearize(ctxt, addr, size, true, &linear); if (rc != X86EMUL_CONTINUE) return rc; return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false); } /* * Prefetch the remaining bytes of the instruction without crossing page * boundary if they are not in fetch_cache yet. */ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) { int rc; unsigned size, max_size; unsigned long linear; int cur_size = ctxt->fetch.end - ctxt->fetch.data; struct segmented_address addr = { .seg = VCPU_SREG_CS, .ea = ctxt->eip + cur_size }; /* * We do not know exactly how many bytes will be needed, and * __linearize is expensive, so fetch as much as possible. We * just have to avoid going beyond the 15 byte limit, the end * of the segment, or the end of the page. * * __linearize is called with size 0 so that it does not do any * boundary check itself. Instead, we use max_size to check * against op_size. */ rc = __linearize(ctxt, addr, &max_size, 0, ctxt->mode, &linear, X86EMUL_F_FETCH); if (unlikely(rc != X86EMUL_CONTINUE)) return rc; size = min_t(unsigned, 15UL ^ cur_size, max_size); size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear)); /* * One instruction can only straddle two pages, * and one has been loaded at the beginning of * x86_decode_insn. So, if not enough bytes * still, we must have hit the 15-byte boundary. */ if (unlikely(size < op_size)) return emulate_gp(ctxt, 0); rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end, size, &ctxt->exception); if (unlikely(rc != X86EMUL_CONTINUE)) return rc; ctxt->fetch.end += size; return X86EMUL_CONTINUE; } static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, unsigned size) { unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr; if (unlikely(done_size < size)) return __do_insn_fetch_bytes(ctxt, size - done_size); else return X86EMUL_CONTINUE; } /* Fetch next part of the instruction being emulated. */ #define insn_fetch(_type, _ctxt) \ ({ _type _x; \ \ rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \ if (rc != X86EMUL_CONTINUE) \ goto done; \ ctxt->_eip += sizeof(_type); \ memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \ ctxt->fetch.ptr += sizeof(_type); \ _x; \ }) #define insn_fetch_arr(_arr, _size, _ctxt) \ ({ \ rc = do_insn_fetch_bytes(_ctxt, _size); \ if (rc != X86EMUL_CONTINUE) \ goto done; \ ctxt->_eip += (_size); \ memcpy(_arr, ctxt->fetch.ptr, _size); \ ctxt->fetch.ptr += (_size); \ }) /* * Given the 'reg' portion of a ModRM byte, and a register block, return a * pointer into the block that addresses the relevant register. * @highbyte_regs specifies whether to decode AH,CH,DH,BH. */ static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg, int byteop) { void *p; int highbyte_regs = (ctxt->rex_prefix == 0) && byteop; if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8) p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1; else p = reg_rmw(ctxt, modrm_reg); return p; } static int read_descriptor(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, u16 *size, unsigned long *address, int op_bytes) { int rc; if (op_bytes == 2) op_bytes = 3; *address = 0; rc = segmented_read_std(ctxt, addr, size, 2); if (rc != X86EMUL_CONTINUE) return rc; addr.ea += 2; rc = segmented_read_std(ctxt, addr, address, op_bytes); return rc; } FASTOP2(add); FASTOP2(or); FASTOP2(adc); FASTOP2(sbb); FASTOP2(and); FASTOP2(sub); FASTOP2(xor); FASTOP2(cmp); FASTOP2(test); FASTOP1SRC2(mul, mul_ex); FASTOP1SRC2(imul, imul_ex); FASTOP1SRC2EX(div, div_ex); FASTOP1SRC2EX(idiv, idiv_ex); FASTOP3WCL(shld); FASTOP3WCL(shrd); FASTOP2W(imul); FASTOP1(not); FASTOP1(neg); FASTOP1(inc); FASTOP1(dec); FASTOP2CL(rol); FASTOP2CL(ror); FASTOP2CL(rcl); FASTOP2CL(rcr); FASTOP2CL(shl); FASTOP2CL(shr); FASTOP2CL(sar); FASTOP2W(bsf); FASTOP2W(bsr); FASTOP2W(bt); FASTOP2W(bts); FASTOP2W(btr); FASTOP2W(btc); FASTOP2(xadd); FASTOP2R(cmp, cmp_r); static int em_bsf_c(struct x86_emulate_ctxt *ctxt) { /* If src is zero, do not writeback, but update flags */ if (ctxt->src.val == 0) ctxt->dst.type = OP_NONE; return fastop(ctxt, em_bsf); } static int em_bsr_c(struct x86_emulate_ctxt *ctxt) { /* If src is zero, do not writeback, but update flags */ if (ctxt->src.val == 0) ctxt->dst.type = OP_NONE; return fastop(ctxt, em_bsr); } static __always_inline u8 test_cc(unsigned int condition, unsigned long flags) { u8 rc; void (*fop)(void) = (void *)em_setcc + FASTOP_SIZE * (condition & 0xf); flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF; asm("push %[flags]; popf; " CALL_NOSPEC : "=a"(rc), ASM_CALL_CONSTRAINT : [thunk_target]"r"(fop), [flags]"r"(flags)); return rc; } static void fetch_register_operand(struct operand *op) { switch (op->bytes) { case 1: op->val = *(u8 *)op->addr.reg; break; case 2: op->val = *(u16 *)op->addr.reg; break; case 4: op->val = *(u32 *)op->addr.reg; break; case 8: op->val = *(u64 *)op->addr.reg; break; } } static int em_fninit(struct x86_emulate_ctxt *ctxt) { if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) return emulate_nm(ctxt); kvm_fpu_get(); asm volatile("fninit"); kvm_fpu_put(); return X86EMUL_CONTINUE; } static int em_fnstcw(struct x86_emulate_ctxt *ctxt) { u16 fcw; if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) return emulate_nm(ctxt); kvm_fpu_get(); asm volatile("fnstcw %0": "+m"(fcw)); kvm_fpu_put(); ctxt->dst.val = fcw; return X86EMUL_CONTINUE; } static int em_fnstsw(struct x86_emulate_ctxt *ctxt) { u16 fsw; if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) return emulate_nm(ctxt); kvm_fpu_get(); asm volatile("fnstsw %0": "+m"(fsw)); kvm_fpu_put(); ctxt->dst.val = fsw; return X86EMUL_CONTINUE; } static void decode_register_operand(struct x86_emulate_ctxt *ctxt, struct operand *op) { unsigned int reg; if (ctxt->d & ModRM) reg = ctxt->modrm_reg; else reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3); if (ctxt->d & Sse) { op->type = OP_XMM; op->bytes = 16; op->addr.xmm = reg; kvm_read_sse_reg(reg, &op->vec_val); return; } if (ctxt->d & Mmx) { reg &= 7; op->type = OP_MM; op->bytes = 8; op->addr.mm = reg; return; } op->type = OP_REG; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp); fetch_register_operand(op); op->orig_val = op->val; } static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg) { if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP) ctxt->modrm_seg = VCPU_SREG_SS; } static int decode_modrm(struct x86_emulate_ctxt *ctxt, struct operand *op) { u8 sib; int index_reg, base_reg, scale; int rc = X86EMUL_CONTINUE; ulong modrm_ea = 0; ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */ index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */ base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */ ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6; ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3; ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07); ctxt->modrm_seg = VCPU_SREG_DS; if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) { op->type = OP_REG; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, ctxt->d & ByteOp); if (ctxt->d & Sse) { op->type = OP_XMM; op->bytes = 16; op->addr.xmm = ctxt->modrm_rm; kvm_read_sse_reg(ctxt->modrm_rm, &op->vec_val); return rc; } if (ctxt->d & Mmx) { op->type = OP_MM; op->bytes = 8; op->addr.mm = ctxt->modrm_rm & 7; return rc; } fetch_register_operand(op); return rc; } op->type = OP_MEM; if (ctxt->ad_bytes == 2) { unsigned bx = reg_read(ctxt, VCPU_REGS_RBX); unsigned bp = reg_read(ctxt, VCPU_REGS_RBP); unsigned si = reg_read(ctxt, VCPU_REGS_RSI); unsigned di = reg_read(ctxt, VCPU_REGS_RDI); /* 16-bit ModR/M decode. */ switch (ctxt->modrm_mod) { case 0: if (ctxt->modrm_rm == 6) modrm_ea += insn_fetch(u16, ctxt); break; case 1: modrm_ea += insn_fetch(s8, ctxt); break; case 2: modrm_ea += insn_fetch(u16, ctxt); break; } switch (ctxt->modrm_rm) { case 0: modrm_ea += bx + si; break; case 1: modrm_ea += bx + di; break; case 2: modrm_ea += bp + si; break; case 3: modrm_ea += bp + di; break; case 4: modrm_ea += si; break; case 5: modrm_ea += di; break; case 6: if (ctxt->modrm_mod != 0) modrm_ea += bp; break; case 7: modrm_ea += bx; break; } if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 || (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0)) ctxt->modrm_seg = VCPU_SREG_SS; modrm_ea = (u16)modrm_ea; } else { /* 32/64-bit ModR/M decode. */ if ((ctxt->modrm_rm & 7) == 4) { sib = insn_fetch(u8, ctxt); index_reg |= (sib >> 3) & 7; base_reg |= sib & 7; scale = sib >> 6; if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0) modrm_ea += insn_fetch(s32, ctxt); else { modrm_ea += reg_read(ctxt, base_reg); adjust_modrm_seg(ctxt, base_reg); /* Increment ESP on POP [ESP] */ if ((ctxt->d & IncSP) && base_reg == VCPU_REGS_RSP) modrm_ea += ctxt->op_bytes; } if (index_reg != 4) modrm_ea += reg_read(ctxt, index_reg) << scale; } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) { modrm_ea += insn_fetch(s32, ctxt); if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt->rip_relative = 1; } else { base_reg = ctxt->modrm_rm; modrm_ea += reg_read(ctxt, base_reg); adjust_modrm_seg(ctxt, base_reg); } switch (ctxt->modrm_mod) { case 1: modrm_ea += insn_fetch(s8, ctxt); break; case 2: modrm_ea += insn_fetch(s32, ctxt); break; } } op->addr.mem.ea = modrm_ea; if (ctxt->ad_bytes != 8) ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea; done: return rc; } static int decode_abs(struct x86_emulate_ctxt *ctxt, struct operand *op) { int rc = X86EMUL_CONTINUE; op->type = OP_MEM; switch (ctxt->ad_bytes) { case 2: op->addr.mem.ea = insn_fetch(u16, ctxt); break; case 4: op->addr.mem.ea = insn_fetch(u32, ctxt); break; case 8: op->addr.mem.ea = insn_fetch(u64, ctxt); break; } done: return rc; } static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt) { long sv = 0, mask; if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) { mask = ~((long)ctxt->dst.bytes * 8 - 1); if (ctxt->src.bytes == 2) sv = (s16)ctxt->src.val & (s16)mask; else if (ctxt->src.bytes == 4) sv = (s32)ctxt->src.val & (s32)mask; else sv = (s64)ctxt->src.val & (s64)mask; ctxt->dst.addr.mem.ea = address_mask(ctxt, ctxt->dst.addr.mem.ea + (sv >> 3)); } /* only subword offset */ ctxt->src.val &= (ctxt->dst.bytes << 3) - 1; } static int read_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *dest, unsigned size) { int rc; struct read_cache *mc = &ctxt->mem_read; if (mc->pos < mc->end) goto read_cached; if (KVM_EMULATOR_BUG_ON((mc->end + size) >= sizeof(mc->data), ctxt)) return X86EMUL_UNHANDLEABLE; rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size, &ctxt->exception); if (rc != X86EMUL_CONTINUE) return rc; mc->end += size; read_cached: memcpy(dest, mc->data + mc->pos, size); mc->pos += size; return X86EMUL_CONTINUE; } static int segmented_read(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, void *data, unsigned size) { int rc; ulong linear; rc = linearize(ctxt, addr, size, false, &linear); if (rc != X86EMUL_CONTINUE) return rc; return read_emulated(ctxt, linear, data, size); } static int segmented_write(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, const void *data, unsigned size) { int rc; ulong linear; rc = linearize(ctxt, addr, size, true, &linear); if (rc != X86EMUL_CONTINUE) return rc; return ctxt->ops->write_emulated(ctxt, linear, data, size, &ctxt->exception); } static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, const void *orig_data, const void *data, unsigned size) { int rc; ulong linear; rc = linearize(ctxt, addr, size, true, &linear); if (rc != X86EMUL_CONTINUE) return rc; return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data, size, &ctxt->exception); } static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, unsigned int size, unsigned short port, void *dest) { struct read_cache *rc = &ctxt->io_read; if (rc->pos == rc->end) { /* refill pio read ahead */ unsigned int in_page, n; unsigned int count = ctxt->rep_prefix ? address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1; in_page = (ctxt->eflags & X86_EFLAGS_DF) ? offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) : PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)); n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count); if (n == 0) n = 1; rc->pos = rc->end = 0; if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n)) return 0; rc->end = n * size; } if (ctxt->rep_prefix && (ctxt->d & String) && !(ctxt->eflags & X86_EFLAGS_DF)) { ctxt->dst.data = rc->data + rc->pos; ctxt->dst.type = OP_MEM_STR; ctxt->dst.count = (rc->end - rc->pos) / size; rc->pos = rc->end; } else { memcpy(dest, rc->data + rc->pos, size); rc->pos += size; } return 1; } static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt, u16 index, struct desc_struct *desc) { struct desc_ptr dt; ulong addr; ctxt->ops->get_idt(ctxt, &dt); if (dt.size < index * 8 + 7) return emulate_gp(ctxt, index << 3 | 0x2); addr = dt.address + index * 8; return linear_read_system(ctxt, addr, desc, sizeof(*desc)); } static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_ptr *dt) { const struct x86_emulate_ops *ops = ctxt->ops; u32 base3 = 0; if (selector & 1 << 2) { struct desc_struct desc; u16 sel; memset(dt, 0, sizeof(*dt)); if (!ops->get_segment(ctxt, &sel, &desc, &base3, VCPU_SREG_LDTR)) return; dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ dt->address = get_desc_base(&desc) | ((u64)base3 << 32); } else ops->get_gdt(ctxt, dt); } static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt, u16 selector, ulong *desc_addr_p) { struct desc_ptr dt; u16 index = selector >> 3; ulong addr; get_descriptor_table_ptr(ctxt, selector, &dt); if (dt.size < index * 8 + 7) return emulate_gp(ctxt, selector & 0xfffc); addr = dt.address + index * 8; #ifdef CONFIG_X86_64 if (addr >> 32 != 0) { u64 efer = 0; ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if (!(efer & EFER_LMA)) addr &= (u32)-1; } #endif *desc_addr_p = addr; return X86EMUL_CONTINUE; } /* allowed just for 8 bytes segments */ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_struct *desc, ulong *desc_addr_p) { int rc; rc = get_descriptor_ptr(ctxt, selector, desc_addr_p); if (rc != X86EMUL_CONTINUE) return rc; return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc)); } /* allowed just for 8 bytes segments */ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_struct *desc) { int rc; ulong addr; rc = get_descriptor_ptr(ctxt, selector, &addr); if (rc != X86EMUL_CONTINUE) return rc; return linear_write_system(ctxt, addr, desc, sizeof(*desc)); } static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, int seg, u8 cpl, enum x86_transfer_type transfer, struct desc_struct *desc) { struct desc_struct seg_desc, old_desc; u8 dpl, rpl; unsigned err_vec = GP_VECTOR; u32 err_code = 0; bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */ ulong desc_addr; int ret; u16 dummy; u32 base3 = 0; memset(&seg_desc, 0, sizeof(seg_desc)); if (ctxt->mode == X86EMUL_MODE_REAL) { /* set real mode segment descriptor (keep limit etc. for * unreal mode) */ ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg); set_desc_base(&seg_desc, selector << 4); goto load; } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) { /* VM86 needs a clean new segment descriptor */ set_desc_base(&seg_desc, selector << 4); set_desc_limit(&seg_desc, 0xffff); seg_desc.type = 3; seg_desc.p = 1; seg_desc.s = 1; seg_desc.dpl = 3; goto load; } rpl = selector & 3; /* TR should be in GDT only */ if (seg == VCPU_SREG_TR && (selector & (1 << 2))) goto exception; /* NULL selector is not valid for TR, CS and (except for long mode) SS */ if (null_selector) { if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR) goto exception; if (seg == VCPU_SREG_SS) { if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl) goto exception; /* * ctxt->ops->set_segment expects the CPL to be in * SS.DPL, so fake an expand-up 32-bit data segment. */ seg_desc.type = 3; seg_desc.p = 1; seg_desc.s = 1; seg_desc.dpl = cpl; seg_desc.d = 1; seg_desc.g = 1; } /* Skip all following checks */ goto load; } ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr); if (ret != X86EMUL_CONTINUE) return ret; err_code = selector & 0xfffc; err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR : GP_VECTOR; /* can't load system descriptor into segment selector */ if (seg <= VCPU_SREG_GS && !seg_desc.s) { if (transfer == X86_TRANSFER_CALL_JMP) return X86EMUL_UNHANDLEABLE; goto exception; } dpl = seg_desc.dpl; switch (seg) { case VCPU_SREG_SS: /* * segment is not a writable data segment or segment * selector's RPL != CPL or DPL != CPL */ if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl) goto exception; break; case VCPU_SREG_CS: /* * KVM uses "none" when loading CS as part of emulating Real * Mode exceptions and IRET (handled above). In all other * cases, loading CS without a control transfer is a KVM bug. */ if (WARN_ON_ONCE(transfer == X86_TRANSFER_NONE)) goto exception; if (!(seg_desc.type & 8)) goto exception; if (transfer == X86_TRANSFER_RET) { /* RET can never return to an inner privilege level. */ if (rpl < cpl) goto exception; /* Outer-privilege level return is not implemented */ if (rpl > cpl) return X86EMUL_UNHANDLEABLE; } if (transfer == X86_TRANSFER_RET || transfer == X86_TRANSFER_TASK_SWITCH) { if (seg_desc.type & 4) { /* conforming */ if (dpl > rpl) goto exception; } else { /* nonconforming */ if (dpl != rpl) goto exception; } } else { /* X86_TRANSFER_CALL_JMP */ if (seg_desc.type & 4) { /* conforming */ if (dpl > cpl) goto exception; } else { /* nonconforming */ if (rpl > cpl || dpl != cpl) goto exception; } } /* in long-mode d/b must be clear if l is set */ if (seg_desc.d && seg_desc.l) { u64 efer = 0; ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if (efer & EFER_LMA) goto exception; } /* CS(RPL) <- CPL */ selector = (selector & 0xfffc) | cpl; break; case VCPU_SREG_TR: if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9)) goto exception; break; case VCPU_SREG_LDTR: if (seg_desc.s || seg_desc.type != 2) goto exception; break; default: /* DS, ES, FS, or GS */ /* * segment is not a data or readable code segment or * ((segment is a data or nonconforming code segment) * and ((RPL > DPL) or (CPL > DPL))) */ if ((seg_desc.type & 0xa) == 0x8 || (((seg_desc.type & 0xc) != 0xc) && (rpl > dpl || cpl > dpl))) goto exception; break; } if (!seg_desc.p) { err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; goto exception; } if (seg_desc.s) { /* mark segment as accessed */ if (!(seg_desc.type & 1)) { seg_desc.type |= 1; ret = write_segment_descriptor(ctxt, selector, &seg_desc); if (ret != X86EMUL_CONTINUE) return ret; } } else if (ctxt->mode == X86EMUL_MODE_PROT64) { ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3)); if (ret != X86EMUL_CONTINUE) return ret; if (emul_is_noncanonical_address(get_desc_base(&seg_desc) | ((u64)base3 << 32), ctxt, X86EMUL_F_DT_LOAD)) return emulate_gp(ctxt, err_code); } if (seg == VCPU_SREG_TR) { old_desc = seg_desc; seg_desc.type |= 2; /* busy */ ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc, sizeof(seg_desc), &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; } load: ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg); if (desc) *desc = seg_desc; return X86EMUL_CONTINUE; exception: return emulate_exception(ctxt, err_vec, err_code, true); } static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, int seg) { u8 cpl = ctxt->ops->cpl(ctxt); /* * None of MOV, POP and LSS can load a NULL selector in CPL=3, but * they can load it at CPL<3 (Intel's manual says only LSS can, * but it's wrong). * * However, the Intel manual says that putting IST=1/DPL=3 in * an interrupt gate will result in SS=3 (the AMD manual instead * says it doesn't), so allow SS=3 in __load_segment_descriptor * and only forbid it here. */ if (seg == VCPU_SREG_SS && selector == 3 && ctxt->mode == X86EMUL_MODE_PROT64) return emulate_exception(ctxt, GP_VECTOR, 0, true); return __load_segment_descriptor(ctxt, selector, seg, cpl, X86_TRANSFER_NONE, NULL); } static void write_register_operand(struct operand *op) { return assign_register(op->addr.reg, op->val, op->bytes); } static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op) { switch (op->type) { case OP_REG: write_register_operand(op); break; case OP_MEM: if (ctxt->lock_prefix) return segmented_cmpxchg(ctxt, op->addr.mem, &op->orig_val, &op->val, op->bytes); else return segmented_write(ctxt, op->addr.mem, &op->val, op->bytes); case OP_MEM_STR: return segmented_write(ctxt, op->addr.mem, op->data, op->bytes * op->count); case OP_XMM: kvm_write_sse_reg(op->addr.xmm, &op->vec_val); break; case OP_MM: kvm_write_mmx_reg(op->addr.mm, &op->mm_val); break; case OP_NONE: /* no writeback */ break; default: break; } return X86EMUL_CONTINUE; } static int emulate_push(struct x86_emulate_ctxt *ctxt, const void *data, int len) { struct segmented_address addr; rsp_increment(ctxt, -len); addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); addr.seg = VCPU_SREG_SS; return segmented_write(ctxt, addr, data, len); } static int em_push(struct x86_emulate_ctxt *ctxt) { /* Disable writeback. */ ctxt->dst.type = OP_NONE; return emulate_push(ctxt, &ctxt->src.val, ctxt->op_bytes); } static int emulate_pop(struct x86_emulate_ctxt *ctxt, void *dest, int len) { int rc; struct segmented_address addr; addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); addr.seg = VCPU_SREG_SS; rc = segmented_read(ctxt, addr, dest, len); if (rc != X86EMUL_CONTINUE) return rc; rsp_increment(ctxt, len); return rc; } static int em_pop(struct x86_emulate_ctxt *ctxt) { return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); } static int emulate_popf(struct x86_emulate_ctxt *ctxt, void *dest, int len) { int rc; unsigned long val = 0; unsigned long change_mask; int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT; int cpl = ctxt->ops->cpl(ctxt); rc = emulate_pop(ctxt, &val, len); if (rc != X86EMUL_CONTINUE) return rc; change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF | X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT | X86_EFLAGS_AC | X86_EFLAGS_ID; switch(ctxt->mode) { case X86EMUL_MODE_PROT64: case X86EMUL_MODE_PROT32: case X86EMUL_MODE_PROT16: if (cpl == 0) change_mask |= X86_EFLAGS_IOPL; if (cpl <= iopl) change_mask |= X86_EFLAGS_IF; break; case X86EMUL_MODE_VM86: if (iopl < 3) return emulate_gp(ctxt, 0); change_mask |= X86_EFLAGS_IF; break; default: /* real mode */ change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF); break; } *(unsigned long *)dest = (ctxt->eflags & ~change_mask) | (val & change_mask); return rc; } static int em_popf(struct x86_emulate_ctxt *ctxt) { ctxt->dst.type = OP_REG; ctxt->dst.addr.reg = &ctxt->eflags; ctxt->dst.bytes = ctxt->op_bytes; return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes); } static int em_enter(struct x86_emulate_ctxt *ctxt) { int rc; unsigned frame_size = ctxt->src.val; unsigned nesting_level = ctxt->src2.val & 31; ulong rbp; if (nesting_level) return X86EMUL_UNHANDLEABLE; rbp = reg_read(ctxt, VCPU_REGS_RBP); rc = emulate_push(ctxt, &rbp, stack_size(ctxt)); if (rc != X86EMUL_CONTINUE) return rc; assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP), stack_mask(ctxt)); assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RSP) - frame_size, stack_mask(ctxt)); return X86EMUL_CONTINUE; } static int em_leave(struct x86_emulate_ctxt *ctxt) { assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP), stack_mask(ctxt)); return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes); } static int em_push_sreg(struct x86_emulate_ctxt *ctxt) { int seg = ctxt->src2.val; ctxt->src.val = get_segment_selector(ctxt, seg); if (ctxt->op_bytes == 4) { rsp_increment(ctxt, -2); ctxt->op_bytes = 2; } return em_push(ctxt); } static int em_pop_sreg(struct x86_emulate_ctxt *ctxt) { int seg = ctxt->src2.val; unsigned long selector = 0; int rc; rc = emulate_pop(ctxt, &selector, 2); if (rc != X86EMUL_CONTINUE) return rc; if (seg == VCPU_SREG_SS) ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; if (ctxt->op_bytes > 2) rsp_increment(ctxt, ctxt->op_bytes - 2); rc = load_segment_descriptor(ctxt, (u16)selector, seg); return rc; } static int em_pusha(struct x86_emulate_ctxt *ctxt) { unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP); int rc = X86EMUL_CONTINUE; int reg = VCPU_REGS_RAX; while (reg <= VCPU_REGS_RDI) { (reg == VCPU_REGS_RSP) ? (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg)); rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ++reg; } return rc; } static int em_pushf(struct x86_emulate_ctxt *ctxt) { ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM; return em_push(ctxt); } static int em_popa(struct x86_emulate_ctxt *ctxt) { int rc = X86EMUL_CONTINUE; int reg = VCPU_REGS_RDI; u32 val = 0; while (reg >= VCPU_REGS_RAX) { if (reg == VCPU_REGS_RSP) { rsp_increment(ctxt, ctxt->op_bytes); --reg; } rc = emulate_pop(ctxt, &val, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) break; assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes); --reg; } return rc; } static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) { const struct x86_emulate_ops *ops = ctxt->ops; int rc; struct desc_ptr dt; gva_t cs_addr; gva_t eip_addr; u16 cs, eip; /* TODO: Add limit checks */ ctxt->src.val = ctxt->eflags; rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC); ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS); rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ctxt->src.val = ctxt->_eip; rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ops->get_idt(ctxt, &dt); eip_addr = dt.address + (irq << 2); cs_addr = dt.address + (irq << 2) + 2; rc = linear_read_system(ctxt, cs_addr, &cs, 2); if (rc != X86EMUL_CONTINUE) return rc; rc = linear_read_system(ctxt, eip_addr, &eip, 2); if (rc != X86EMUL_CONTINUE) return rc; rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS); if (rc != X86EMUL_CONTINUE) return rc; ctxt->_eip = eip; return rc; } int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) { int rc; invalidate_registers(ctxt); rc = __emulate_int_real(ctxt, irq); if (rc == X86EMUL_CONTINUE) writeback_registers(ctxt); return rc; } static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq) { switch(ctxt->mode) { case X86EMUL_MODE_REAL: return __emulate_int_real(ctxt, irq); case X86EMUL_MODE_VM86: case X86EMUL_MODE_PROT16: case X86EMUL_MODE_PROT32: case X86EMUL_MODE_PROT64: default: /* Protected mode interrupts unimplemented yet */ return X86EMUL_UNHANDLEABLE; } } static int emulate_iret_real(struct x86_emulate_ctxt *ctxt) { int rc = X86EMUL_CONTINUE; unsigned long temp_eip = 0; unsigned long temp_eflags = 0; unsigned long cs = 0; unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF | X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF | X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF | X86_EFLAGS_AC | X86_EFLAGS_ID | X86_EFLAGS_FIXED; unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF | X86_EFLAGS_VIP; /* TODO: Add stack limit check */ rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; if (temp_eip & ~0xffff) return emulate_gp(ctxt, 0); rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); if (rc != X86EMUL_CONTINUE) return rc; ctxt->_eip = temp_eip; if (ctxt->op_bytes == 4) ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask)); else if (ctxt->op_bytes == 2) { ctxt->eflags &= ~0xffff; ctxt->eflags |= temp_eflags; } ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */ ctxt->eflags |= X86_EFLAGS_FIXED; ctxt->ops->set_nmi_mask(ctxt, false); return rc; } static int em_iret(struct x86_emulate_ctxt *ctxt) { switch(ctxt->mode) { case X86EMUL_MODE_REAL: return emulate_iret_real(ctxt); case X86EMUL_MODE_VM86: case X86EMUL_MODE_PROT16: case X86EMUL_MODE_PROT32: case X86EMUL_MODE_PROT64: default: /* iret from protected mode unimplemented yet */ return X86EMUL_UNHANDLEABLE; } } static int em_jmp_far(struct x86_emulate_ctxt *ctxt) { int rc; unsigned short sel; struct desc_struct new_desc; u8 cpl = ctxt->ops->cpl(ctxt); memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, X86_TRANSFER_CALL_JMP, &new_desc); if (rc != X86EMUL_CONTINUE) return rc; rc = assign_eip_far(ctxt, ctxt->src.val); /* Error handling is not implemented. */ if (rc != X86EMUL_CONTINUE) return X86EMUL_UNHANDLEABLE; return rc; } static int em_jmp_abs(struct x86_emulate_ctxt *ctxt) { return assign_eip_near(ctxt, ctxt->src.val); } static int em_call_near_abs(struct x86_emulate_ctxt *ctxt) { int rc; long int old_eip; old_eip = ctxt->_eip; rc = assign_eip_near(ctxt, ctxt->src.val); if (rc != X86EMUL_CONTINUE) return rc; ctxt->src.val = old_eip; rc = em_push(ctxt); return rc; } static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt) { u64 old = ctxt->dst.orig_val64; if (ctxt->dst.bytes == 16) return X86EMUL_UNHANDLEABLE; if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) || ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) { *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0); *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32); ctxt->eflags &= ~X86_EFLAGS_ZF; } else { ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) | (u32) reg_read(ctxt, VCPU_REGS_RBX); ctxt->eflags |= X86_EFLAGS_ZF; } return X86EMUL_CONTINUE; } static int em_ret(struct x86_emulate_ctxt *ctxt) { int rc; unsigned long eip = 0; rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; return assign_eip_near(ctxt, eip); } static int em_ret_far(struct x86_emulate_ctxt *ctxt) { int rc; unsigned long eip = 0; unsigned long cs = 0; int cpl = ctxt->ops->cpl(ctxt); struct desc_struct new_desc; rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl, X86_TRANSFER_RET, &new_desc); if (rc != X86EMUL_CONTINUE) return rc; rc = assign_eip_far(ctxt, eip); /* Error handling is not implemented. */ if (rc != X86EMUL_CONTINUE) return X86EMUL_UNHANDLEABLE; return rc; } static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt) { int rc; rc = em_ret_far(ctxt); if (rc != X86EMUL_CONTINUE) return rc; rsp_increment(ctxt, ctxt->src.val); return X86EMUL_CONTINUE; } static int em_cmpxchg(struct x86_emulate_ctxt *ctxt) { /* Save real source value, then compare EAX against destination. */ ctxt->dst.orig_val = ctxt->dst.val; ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX); ctxt->src.orig_val = ctxt->src.val; ctxt->src.val = ctxt->dst.orig_val; fastop(ctxt, em_cmp); if (ctxt->eflags & X86_EFLAGS_ZF) { /* Success: write back to memory; no update of EAX */ ctxt->src.type = OP_NONE; ctxt->dst.val = ctxt->src.orig_val; } else { /* Failure: write the value we saw to EAX. */ ctxt->src.type = OP_REG; ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); ctxt->src.val = ctxt->dst.orig_val; /* Create write-cycle to dest by writing the same value */ ctxt->dst.val = ctxt->dst.orig_val; } return X86EMUL_CONTINUE; } static int em_lseg(struct x86_emulate_ctxt *ctxt) { int seg = ctxt->src2.val; unsigned short sel; int rc; memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); rc = load_segment_descriptor(ctxt, sel, seg); if (rc != X86EMUL_CONTINUE) return rc; ctxt->dst.val = ctxt->src.val; return rc; } static int em_rsm(struct x86_emulate_ctxt *ctxt) { if (!ctxt->ops->is_smm(ctxt)) return emulate_ud(ctxt); if (ctxt->ops->leave_smm(ctxt)) ctxt->ops->triple_fault(ctxt); return emulator_recalc_and_set_mode(ctxt); } static void setup_syscalls_segments(struct desc_struct *cs, struct desc_struct *ss) { cs->l = 0; /* will be adjusted later */ set_desc_base(cs, 0); /* flat segment */ cs->g = 1; /* 4kb granularity */ set_desc_limit(cs, 0xfffff); /* 4GB limit */ cs->type = 0x0b; /* Read, Execute, Accessed */ cs->s = 1; cs->dpl = 0; /* will be adjusted later */ cs->p = 1; cs->d = 1; cs->avl = 0; set_desc_base(ss, 0); /* flat segment */ set_desc_limit(ss, 0xfffff); /* 4GB limit */ ss->g = 1; /* 4kb granularity */ ss->s = 1; ss->type = 0x03; /* Read/Write, Accessed */ ss->d = 1; /* 32bit stack segment */ ss->dpl = 0; ss->p = 1; ss->l = 0; ss->avl = 0; } static int em_syscall(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data; u16 cs_sel, ss_sel; u64 efer = 0; /* syscall is not available in real mode */ if (ctxt->mode == X86EMUL_MODE_REAL || ctxt->mode == X86EMUL_MODE_VM86) return emulate_ud(ctxt); /* * Intel compatible CPUs only support SYSCALL in 64-bit mode, whereas * AMD allows SYSCALL in any flavor of protected mode. Note, it's * infeasible to emulate Intel behavior when running on AMD hardware, * as SYSCALL won't fault in the "wrong" mode, i.e. there is no #UD * for KVM to trap-and-emulate, unlike emulating AMD on Intel. */ if (ctxt->mode != X86EMUL_MODE_PROT64 && ctxt->ops->guest_cpuid_is_intel_compatible(ctxt)) return emulate_ud(ctxt); ops->get_msr(ctxt, MSR_EFER, &efer); if (!(efer & EFER_SCE)) return emulate_ud(ctxt); setup_syscalls_segments(&cs, &ss); ops->get_msr(ctxt, MSR_STAR, &msr_data); msr_data >>= 32; cs_sel = (u16)(msr_data & 0xfffc); ss_sel = (u16)(msr_data + 8); if (efer & EFER_LMA) { cs.d = 0; cs.l = 1; } ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip; if (efer & EFER_LMA) { #ifdef CONFIG_X86_64 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags; ops->get_msr(ctxt, ctxt->mode == X86EMUL_MODE_PROT64 ? MSR_LSTAR : MSR_CSTAR, &msr_data); ctxt->_eip = msr_data; ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data); ctxt->eflags &= ~msr_data; ctxt->eflags |= X86_EFLAGS_FIXED; #endif } else { /* legacy mode */ ops->get_msr(ctxt, MSR_STAR, &msr_data); ctxt->_eip = (u32)msr_data; ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); } ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; return X86EMUL_CONTINUE; } static int em_sysenter(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data; u16 cs_sel, ss_sel; u64 efer = 0; ops->get_msr(ctxt, MSR_EFER, &efer); /* inject #GP if in real mode */ if (ctxt->mode == X86EMUL_MODE_REAL) return emulate_gp(ctxt, 0); /* * Intel's architecture allows SYSENTER in compatibility mode, but AMD * does not. Note, AMD does allow SYSENTER in legacy protected mode. */ if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA) && !ctxt->ops->guest_cpuid_is_intel_compatible(ctxt)) return emulate_ud(ctxt); /* sysenter/sysexit have not been tested in 64bit mode. */ if (ctxt->mode == X86EMUL_MODE_PROT64) return X86EMUL_UNHANDLEABLE; ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); if ((msr_data & 0xfffc) == 0x0) return emulate_gp(ctxt, 0); setup_syscalls_segments(&cs, &ss); ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK; ss_sel = cs_sel + 8; if (efer & EFER_LMA) { cs.d = 0; cs.l = 1; } ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data); ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data; ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data : (u32)msr_data; if (efer & EFER_LMA) ctxt->mode = X86EMUL_MODE_PROT64; return X86EMUL_CONTINUE; } static int em_sysexit(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data, rcx, rdx; int usermode; u16 cs_sel = 0, ss_sel = 0; /* inject #GP if in real mode or Virtual 8086 mode */ if (ctxt->mode == X86EMUL_MODE_REAL || ctxt->mode == X86EMUL_MODE_VM86) return emulate_gp(ctxt, 0); setup_syscalls_segments(&cs, &ss); if ((ctxt->rex_prefix & 0x8) != 0x0) usermode = X86EMUL_MODE_PROT64; else usermode = X86EMUL_MODE_PROT32; rcx = reg_read(ctxt, VCPU_REGS_RCX); rdx = reg_read(ctxt, VCPU_REGS_RDX); cs.dpl = 3; ss.dpl = 3; ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); switch (usermode) { case X86EMUL_MODE_PROT32: cs_sel = (u16)(msr_data + 16); if ((msr_data & 0xfffc) == 0x0) return emulate_gp(ctxt, 0); ss_sel = (u16)(msr_data + 24); rcx = (u32)rcx; rdx = (u32)rdx; break; case X86EMUL_MODE_PROT64: cs_sel = (u16)(msr_data + 32); if (msr_data == 0x0) return emulate_gp(ctxt, 0); ss_sel = cs_sel + 8; cs.d = 0; cs.l = 1; if (emul_is_noncanonical_address(rcx, ctxt, 0) || emul_is_noncanonical_address(rdx, ctxt, 0)) return emulate_gp(ctxt, 0); break; } cs_sel |= SEGMENT_RPL_MASK; ss_sel |= SEGMENT_RPL_MASK; ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); ctxt->_eip = rdx; ctxt->mode = usermode; *reg_write(ctxt, VCPU_REGS_RSP) = rcx; return X86EMUL_CONTINUE; } static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt) { int iopl; if (ctxt->mode == X86EMUL_MODE_REAL) return false; if (ctxt->mode == X86EMUL_MODE_VM86) return true; iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT; return ctxt->ops->cpl(ctxt) > iopl; } #define VMWARE_PORT_VMPORT (0x5658) #define VMWARE_PORT_VMRPC (0x5659) static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, u16 port, u16 len) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct tr_seg; u32 base3; int r; u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7; unsigned mask = (1 << len) - 1; unsigned long base; /* * VMware allows access to these ports even if denied * by TSS I/O permission bitmap. Mimic behavior. */ if (enable_vmware_backdoor && ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC))) return true; ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR); if (!tr_seg.p) return false; if (desc_limit_scaled(&tr_seg) < 103) return false; base = get_desc_base(&tr_seg); #ifdef CONFIG_X86_64 base |= ((u64)base3) << 32; #endif r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true); if (r != X86EMUL_CONTINUE) return false; if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg)) return false; r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true); if (r != X86EMUL_CONTINUE) return false; if ((perm >> bit_idx) & mask) return false; return true; } static bool emulator_io_permitted(struct x86_emulate_ctxt *ctxt, u16 port, u16 len) { if (ctxt->perm_ok) return true; if (emulator_bad_iopl(ctxt)) if (!emulator_io_port_access_allowed(ctxt, port, len)) return false; ctxt->perm_ok = true; return true; } static void string_registers_quirk(struct x86_emulate_ctxt *ctxt) { /* * Intel CPUs mask the counter and pointers in quite strange * manner when ECX is zero due to REP-string optimizations. */ #ifdef CONFIG_X86_64 u32 eax, ebx, ecx, edx; if (ctxt->ad_bytes != 4) return; eax = ecx = 0; ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true); if (!is_guest_vendor_intel(ebx, ecx, edx)) return; *reg_write(ctxt, VCPU_REGS_RCX) = 0; switch (ctxt->b) { case 0xa4: /* movsb */ case 0xa5: /* movsd/w */ *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1; fallthrough; case 0xaa: /* stosb */ case 0xab: /* stosd/w */ *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1; } #endif } static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, struct tss_segment_16 *tss) { tss->ip = ctxt->_eip; tss->flag = ctxt->eflags; tss->ax = reg_read(ctxt, VCPU_REGS_RAX); tss->cx = reg_read(ctxt, VCPU_REGS_RCX); tss->dx = reg_read(ctxt, VCPU_REGS_RDX); tss->bx = reg_read(ctxt, VCPU_REGS_RBX); tss->sp = reg_read(ctxt, VCPU_REGS_RSP); tss->bp = reg_read(ctxt, VCPU_REGS_RBP); tss->si = reg_read(ctxt, VCPU_REGS_RSI); tss->di = reg_read(ctxt, VCPU_REGS_RDI); tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR); } static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, struct tss_segment_16 *tss) { int ret; u8 cpl; ctxt->_eip = tss->ip; ctxt->eflags = tss->flag | 2; *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax; *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx; *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx; *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx; *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp; *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp; *reg_write(ctxt, VCPU_REGS_RSI) = tss->si; *reg_write(ctxt, VCPU_REGS_RDI) = tss->di; /* * SDM says that segment selectors are loaded before segment * descriptors */ set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR); set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); cpl = tss->cs & 3; /* * Now load segment descriptors. If fault happens at this stage * it is handled in a context of new task */ ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, X86_TRANSFER_TASK_SWITCH, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, X86_TRANSFER_TASK_SWITCH, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, X86_TRANSFER_TASK_SWITCH, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, X86_TRANSFER_TASK_SWITCH, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, X86_TRANSFER_TASK_SWITCH, NULL); if (ret != X86EMUL_CONTINUE) return ret; return X86EMUL_CONTINUE; } static int task_switch_16(struct x86_emulate_ctxt *ctxt, u16 old_tss_sel, ulong old_tss_base, struct desc_struct *new_desc) { struct tss_segment_16 tss_seg; int ret; u32 new_tss_base = get_desc_base(new_desc); ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg)); if (ret != X86EMUL_CONTINUE) return ret; save_state_to_tss16(ctxt, &tss_seg); ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg)); if (ret != X86EMUL_CONTINUE) return ret; ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg)); if (ret != X86EMUL_CONTINUE) return ret; if (old_tss_sel != 0xffff) { tss_seg.prev_task_link = old_tss_sel; ret = linear_write_system(ctxt, new_tss_base, &tss_seg.prev_task_link, sizeof(tss_seg.prev_task_link)); if (ret != X86EMUL_CONTINUE) return ret; } return load_state_from_tss16(ctxt, &tss_seg); } static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, struct tss_segment_32 *tss) { /* CR3 and ldt selector are not saved intentionally */ tss->eip = ctxt->_eip; tss->eflags = ctxt->eflags; tss->eax = reg_read(ctxt, VCPU_REGS_RAX); tss->ecx = reg_read(ctxt, VCPU_REGS_RCX); tss->edx = reg_read(ctxt, VCPU_REGS_RDX); tss->ebx = reg_read(ctxt, VCPU_REGS_RBX); tss->esp = reg_read(ctxt, VCPU_REGS_RSP); tss->ebp = reg_read(ctxt, VCPU_REGS_RBP); tss->esi = reg_read(ctxt, VCPU_REGS_RSI); tss->edi = reg_read(ctxt, VCPU_REGS_RDI); tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS); tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS); } static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, struct tss_segment_32 *tss) { int ret; u8 cpl; if (ctxt->ops->set_cr(ctxt, 3, tss->cr3)) return emulate_gp(ctxt, 0); ctxt->_eip = tss->eip; ctxt->eflags = tss->eflags | 2; /* General purpose registers */ *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax; *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx; *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx; *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx; *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp; *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp; *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi; *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi; /* * SDM says that segment selectors are loaded before segment * descriptors. This is important because CPL checks will * use CS.RPL. */ set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS); set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS); /* * If we're switching between Protected Mode and VM86, we need to make * sure to update the mode before loading the segment descriptors so * that the selectors are interpreted correctly. */ if (ctxt->eflags & X86_EFLAGS_VM) { ctxt->mode = X86EMUL_MODE_VM86; cpl = 3; } else { ctxt->mode = X86EMUL_MODE_PROT32; cpl = tss->cs & 3; } /* * Now load segment descriptors. If fault happens at this stage * it is handled in a context of new task */ ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl, X86_TRANSFER_TASK_SWITCH, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, X86_TRANSFER_TASK_SWITCH, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, X86_TRANSFER_TASK_SWITCH, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, X86_TRANSFER_TASK_SWITCH, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, X86_TRANSFER_TASK_SWITCH, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, X86_TRANSFER_TASK_SWITCH, NULL); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, X86_TRANSFER_TASK_SWITCH, NULL); return ret; } static int task_switch_32(struct x86_emulate_ctxt *ctxt, u16 old_tss_sel, ulong old_tss_base, struct desc_struct *new_desc) { struct tss_segment_32 tss_seg; int ret; u32 new_tss_base = get_desc_base(new_desc); u32 eip_offset = offsetof(struct tss_segment_32, eip); u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector); ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg)); if (ret != X86EMUL_CONTINUE) return ret; save_state_to_tss32(ctxt, &tss_seg); /* Only GP registers and segment selectors are saved */ ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip, ldt_sel_offset - eip_offset); if (ret != X86EMUL_CONTINUE) return ret; ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg)); if (ret != X86EMUL_CONTINUE) return ret; if (old_tss_sel != 0xffff) { tss_seg.prev_task_link = old_tss_sel; ret = linear_write_system(ctxt, new_tss_base, &tss_seg.prev_task_link, sizeof(tss_seg.prev_task_link)); if (ret != X86EMUL_CONTINUE) return ret; } return load_state_from_tss32(ctxt, &tss_seg); } static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct curr_tss_desc, next_tss_desc; int ret; u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR); ulong old_tss_base = ops->get_cached_segment_base(ctxt, VCPU_SREG_TR); u32 desc_limit; ulong desc_addr, dr7; /* FIXME: old_tss_base == ~0 ? */ ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr); if (ret != X86EMUL_CONTINUE) return ret; ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr); if (ret != X86EMUL_CONTINUE) return ret; /* FIXME: check that next_tss_desc is tss */ /* * Check privileges. The three cases are task switch caused by... * * 1. jmp/call/int to task gate: Check against DPL of the task gate * 2. Exception/IRQ/iret: No check is performed * 3. jmp/call to TSS/task-gate: No check is performed since the * hardware checks it before exiting. */ if (reason == TASK_SWITCH_GATE) { if (idt_index != -1) { /* Software interrupts */ struct desc_struct task_gate_desc; int dpl; ret = read_interrupt_descriptor(ctxt, idt_index, &task_gate_desc); if (ret != X86EMUL_CONTINUE) return ret; dpl = task_gate_desc.dpl; if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl) return emulate_gp(ctxt, (idt_index << 3) | 0x2); } } desc_limit = desc_limit_scaled(&next_tss_desc); if (!next_tss_desc.p || ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || desc_limit < 0x2b)) { return emulate_ts(ctxt, tss_selector & 0xfffc); } if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */ write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc); } if (reason == TASK_SWITCH_IRET) ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT; /* set back link to prev task only if NT bit is set in eflags note that old_tss_sel is not used after this point */ if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE) old_tss_sel = 0xffff; if (next_tss_desc.type & 8) ret = task_switch_32(ctxt, old_tss_sel, old_tss_base, &next_tss_desc); else ret = task_switch_16(ctxt, old_tss_sel, old_tss_base, &next_tss_desc); if (ret != X86EMUL_CONTINUE) return ret; if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT; if (reason != TASK_SWITCH_IRET) { next_tss_desc.type |= (1 << 1); /* set busy flag */ write_segment_descriptor(ctxt, tss_selector, &next_tss_desc); } ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS); ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR); if (has_error_code) { ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2; ctxt->lock_prefix = 0; ctxt->src.val = (unsigned long) error_code; ret = em_push(ctxt); } dr7 = ops->get_dr(ctxt, 7); ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN)); return ret; } int emulator_task_switch(struct x86_emulate_ctxt *ctxt, u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code) { int rc; invalidate_registers(ctxt); ctxt->_eip = ctxt->eip; ctxt->dst.type = OP_NONE; rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason, has_error_code, error_code); if (rc == X86EMUL_CONTINUE) { ctxt->eip = ctxt->_eip; writeback_registers(ctxt); } return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; } static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg, struct operand *op) { int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count; register_address_increment(ctxt, reg, df * op->bytes); op->addr.mem.ea = register_address(ctxt, reg); } static int em_das(struct x86_emulate_ctxt *ctxt) { u8 al, old_al; bool af, cf, old_cf; cf = ctxt->eflags & X86_EFLAGS_CF; al = ctxt->dst.val; old_al = al; old_cf = cf; cf = false; af = ctxt->eflags & X86_EFLAGS_AF; if ((al & 0x0f) > 9 || af) { al -= 6; cf = old_cf | (al >= 250); af = true; } else { af = false; } if (old_al > 0x99 || old_cf) { al -= 0x60; cf = true; } ctxt->dst.val = al; /* Set PF, ZF, SF */ ctxt->src.type = OP_IMM; ctxt->src.val = 0; ctxt->src.bytes = 1; fastop(ctxt, em_or); ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF); if (cf) ctxt->eflags |= X86_EFLAGS_CF; if (af) ctxt->eflags |= X86_EFLAGS_AF; return X86EMUL_CONTINUE; } static int em_aam(struct x86_emulate_ctxt *ctxt) { u8 al, ah; if (ctxt->src.val == 0) return emulate_de(ctxt); al = ctxt->dst.val & 0xff; ah = al / ctxt->src.val; al %= ctxt->src.val; ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8); /* Set PF, ZF, SF */ ctxt->src.type = OP_IMM; ctxt->src.val = 0; ctxt->src.bytes = 1; fastop(ctxt, em_or); return X86EMUL_CONTINUE; } static int em_aad(struct x86_emulate_ctxt *ctxt) { u8 al = ctxt->dst.val & 0xff; u8 ah = (ctxt->dst.val >> 8) & 0xff; al = (al + (ah * ctxt->src.val)) & 0xff; ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al; /* Set PF, ZF, SF */ ctxt->src.type = OP_IMM; ctxt->src.val = 0; ctxt->src.bytes = 1; fastop(ctxt, em_or); return X86EMUL_CONTINUE; } static int em_call(struct x86_emulate_ctxt *ctxt) { int rc; long rel = ctxt->src.val; ctxt->src.val = (unsigned long)ctxt->_eip; rc = jmp_rel(ctxt, rel); if (rc != X86EMUL_CONTINUE) return rc; return em_push(ctxt); } static int em_call_far(struct x86_emulate_ctxt *ctxt) { u16 sel, old_cs; ulong old_eip; int rc; struct desc_struct old_desc, new_desc; const struct x86_emulate_ops *ops = ctxt->ops; int cpl = ctxt->ops->cpl(ctxt); enum x86emul_mode prev_mode = ctxt->mode; old_eip = ctxt->_eip; ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS); memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, X86_TRANSFER_CALL_JMP, &new_desc); if (rc != X86EMUL_CONTINUE) return rc; rc = assign_eip_far(ctxt, ctxt->src.val); if (rc != X86EMUL_CONTINUE) goto fail; ctxt->src.val = old_cs; rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) goto fail; ctxt->src.val = old_eip; rc = em_push(ctxt); /* If we failed, we tainted the memory, but the very least we should restore cs */ if (rc != X86EMUL_CONTINUE) { pr_warn_once("faulting far call emulation tainted memory\n"); goto fail; } return rc; fail: ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); ctxt->mode = prev_mode; return rc; } static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) { int rc; unsigned long eip = 0; rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; rc = assign_eip_near(ctxt, eip); if (rc != X86EMUL_CONTINUE) return rc; rsp_increment(ctxt, ctxt->src.val); return X86EMUL_CONTINUE; } static int em_xchg(struct x86_emulate_ctxt *ctxt) { /* Write back the register source. */ ctxt->src.val = ctxt->dst.val; write_register_operand(&ctxt->src); /* Write back the memory destination with implicit LOCK prefix. */ ctxt->dst.val = ctxt->src.orig_val; ctxt->lock_prefix = 1; return X86EMUL_CONTINUE; } static int em_imul_3op(struct x86_emulate_ctxt *ctxt) { ctxt->dst.val = ctxt->src2.val; return fastop(ctxt, em_imul); } static int em_cwd(struct x86_emulate_ctxt *ctxt) { ctxt->dst.type = OP_REG; ctxt->dst.bytes = ctxt->src.bytes; ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1); return X86EMUL_CONTINUE; } static int em_rdpid(struct x86_emulate_ctxt *ctxt) { u64 tsc_aux = 0; if (!ctxt->ops->guest_has_rdpid(ctxt)) return emulate_ud(ctxt); ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux); ctxt->dst.val = tsc_aux; return X86EMUL_CONTINUE; } static int em_rdtsc(struct x86_emulate_ctxt *ctxt) { u64 tsc = 0; ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc); *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc; *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32; return X86EMUL_CONTINUE; } static int em_rdpmc(struct x86_emulate_ctxt *ctxt) { u64 pmc; if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc)) return emulate_gp(ctxt, 0); *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc; *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32; return X86EMUL_CONTINUE; } static int em_mov(struct x86_emulate_ctxt *ctxt) { memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr)); return X86EMUL_CONTINUE; } static int em_movbe(struct x86_emulate_ctxt *ctxt) { u16 tmp; if (!ctxt->ops->guest_has_movbe(ctxt)) return emulate_ud(ctxt); switch (ctxt->op_bytes) { case 2: /* * From MOVBE definition: "...When the operand size is 16 bits, * the upper word of the destination register remains unchanged * ..." * * Both casting ->valptr and ->val to u16 breaks strict aliasing * rules so we have to do the operation almost per hand. */ tmp = (u16)ctxt->src.val; ctxt->dst.val &= ~0xffffUL; ctxt->dst.val |= (unsigned long)swab16(tmp); break; case 4: ctxt->dst.val = swab32((u32)ctxt->src.val); break; case 8: ctxt->dst.val = swab64(ctxt->src.val); break; default: BUG(); } return X86EMUL_CONTINUE; } static int em_cr_write(struct x86_emulate_ctxt *ctxt) { int cr_num = ctxt->modrm_reg; int r; if (ctxt->ops->set_cr(ctxt, cr_num, ctxt->src.val)) return emulate_gp(ctxt, 0); /* Disable writeback. */ ctxt->dst.type = OP_NONE; if (cr_num == 0) { /* * CR0 write might have updated CR0.PE and/or CR0.PG * which can affect the cpu's execution mode. */ r = emulator_recalc_and_set_mode(ctxt); if (r != X86EMUL_CONTINUE) return r; } return X86EMUL_CONTINUE; } static int em_dr_write(struct x86_emulate_ctxt *ctxt) { unsigned long val; if (ctxt->mode == X86EMUL_MODE_PROT64) val = ctxt->src.val & ~0ULL; else val = ctxt->src.val & ~0U; /* #UD condition is already handled. */ if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0) return emulate_gp(ctxt, 0); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_wrmsr(struct x86_emulate_ctxt *ctxt) { u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX); u64 msr_data; int r; msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX) | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32); r = ctxt->ops->set_msr_with_filter(ctxt, msr_index, msr_data); if (r == X86EMUL_PROPAGATE_FAULT) return emulate_gp(ctxt, 0); return r; } static int em_rdmsr(struct x86_emulate_ctxt *ctxt) { u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX); u64 msr_data; int r; r = ctxt->ops->get_msr_with_filter(ctxt, msr_index, &msr_data); if (r == X86EMUL_PROPAGATE_FAULT) return emulate_gp(ctxt, 0); if (r == X86EMUL_CONTINUE) { *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data; *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32; } return r; } static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment) { if (segment > VCPU_SREG_GS && (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) && ctxt->ops->cpl(ctxt) > 0) return emulate_gp(ctxt, 0); ctxt->dst.val = get_segment_selector(ctxt, segment); if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM) ctxt->dst.bytes = 2; return X86EMUL_CONTINUE; } static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt) { if (ctxt->modrm_reg > VCPU_SREG_GS) return emulate_ud(ctxt); return em_store_sreg(ctxt, ctxt->modrm_reg); } static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt) { u16 sel = ctxt->src.val; if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS) return emulate_ud(ctxt); if (ctxt->modrm_reg == VCPU_SREG_SS) ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; /* Disable writeback. */ ctxt->dst.type = OP_NONE; return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg); } static int em_sldt(struct x86_emulate_ctxt *ctxt) { return em_store_sreg(ctxt, VCPU_SREG_LDTR); } static int em_lldt(struct x86_emulate_ctxt *ctxt) { u16 sel = ctxt->src.val; /* Disable writeback. */ ctxt->dst.type = OP_NONE; return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR); } static int em_str(struct x86_emulate_ctxt *ctxt) { return em_store_sreg(ctxt, VCPU_SREG_TR); } static int em_ltr(struct x86_emulate_ctxt *ctxt) { u16 sel = ctxt->src.val; /* Disable writeback. */ ctxt->dst.type = OP_NONE; return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR); } static int em_invlpg(struct x86_emulate_ctxt *ctxt) { int rc; ulong linear; unsigned int max_size; rc = __linearize(ctxt, ctxt->src.addr.mem, &max_size, 1, ctxt->mode, &linear, X86EMUL_F_INVLPG); if (rc == X86EMUL_CONTINUE) ctxt->ops->invlpg(ctxt, linear); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_clts(struct x86_emulate_ctxt *ctxt) { ulong cr0; cr0 = ctxt->ops->get_cr(ctxt, 0); cr0 &= ~X86_CR0_TS; ctxt->ops->set_cr(ctxt, 0, cr0); return X86EMUL_CONTINUE; } static int em_hypercall(struct x86_emulate_ctxt *ctxt) { int rc = ctxt->ops->fix_hypercall(ctxt); if (rc != X86EMUL_CONTINUE) return rc; /* Let the processor re-execute the fixed hypercall */ ctxt->_eip = ctxt->eip; /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt, void (*get)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *ptr)) { struct desc_ptr desc_ptr; if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) && ctxt->ops->cpl(ctxt) > 0) return emulate_gp(ctxt, 0); if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt->op_bytes = 8; get(ctxt, &desc_ptr); if (ctxt->op_bytes == 2) { ctxt->op_bytes = 4; desc_ptr.address &= 0x00ffffff; } /* Disable writeback. */ ctxt->dst.type = OP_NONE; return segmented_write_std(ctxt, ctxt->dst.addr.mem, &desc_ptr, 2 + ctxt->op_bytes); } static int em_sgdt(struct x86_emulate_ctxt *ctxt) { return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt); } static int em_sidt(struct x86_emulate_ctxt *ctxt) { return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt); } static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt) { struct desc_ptr desc_ptr; int rc; if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt->op_bytes = 8; rc = read_descriptor(ctxt, ctxt->src.addr.mem, &desc_ptr.size, &desc_ptr.address, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; if (ctxt->mode == X86EMUL_MODE_PROT64 && emul_is_noncanonical_address(desc_ptr.address, ctxt, X86EMUL_F_DT_LOAD)) return emulate_gp(ctxt, 0); if (lgdt) ctxt->ops->set_gdt(ctxt, &desc_ptr); else ctxt->ops->set_idt(ctxt, &desc_ptr); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_lgdt(struct x86_emulate_ctxt *ctxt) { return em_lgdt_lidt(ctxt, true); } static int em_lidt(struct x86_emulate_ctxt *ctxt) { return em_lgdt_lidt(ctxt, false); } static int em_smsw(struct x86_emulate_ctxt *ctxt) { if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) && ctxt->ops->cpl(ctxt) > 0) return emulate_gp(ctxt, 0); if (ctxt->dst.type == OP_MEM) ctxt->dst.bytes = 2; ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0); return X86EMUL_CONTINUE; } static int em_lmsw(struct x86_emulate_ctxt *ctxt) { ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul) | (ctxt->src.val & 0x0f)); ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_loop(struct x86_emulate_ctxt *ctxt) { int rc = X86EMUL_CONTINUE; register_address_increment(ctxt, VCPU_REGS_RCX, -1); if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) && (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags))) rc = jmp_rel(ctxt, ctxt->src.val); return rc; } static int em_jcxz(struct x86_emulate_ctxt *ctxt) { int rc = X86EMUL_CONTINUE; if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) rc = jmp_rel(ctxt, ctxt->src.val); return rc; } static int em_in(struct x86_emulate_ctxt *ctxt) { if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val, &ctxt->dst.val)) return X86EMUL_IO_NEEDED; return X86EMUL_CONTINUE; } static int em_out(struct x86_emulate_ctxt *ctxt) { ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val, &ctxt->src.val, 1); /* Disable writeback. */ ctxt->dst.type = OP_NONE; return X86EMUL_CONTINUE; } static int em_cli(struct x86_emulate_ctxt *ctxt) { if (emulator_bad_iopl(ctxt)) return emulate_gp(ctxt, 0); ctxt->eflags &= ~X86_EFLAGS_IF; return X86EMUL_CONTINUE; } static int em_sti(struct x86_emulate_ctxt *ctxt) { if (emulator_bad_iopl(ctxt)) return emulate_gp(ctxt, 0); ctxt->interruptibility = KVM_X86_SHADOW_INT_STI; ctxt->eflags |= X86_EFLAGS_IF; return X86EMUL_CONTINUE; } static int em_cpuid(struct x86_emulate_ctxt *ctxt) { u32 eax, ebx, ecx, edx; u64 msr = 0; ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr); if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT && ctxt->ops->cpl(ctxt)) { return emulate_gp(ctxt, 0); } eax = reg_read(ctxt, VCPU_REGS_RAX); ecx = reg_read(ctxt, VCPU_REGS_RCX); ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false); *reg_write(ctxt, VCPU_REGS_RAX) = eax; *reg_write(ctxt, VCPU_REGS_RBX) = ebx; *reg_write(ctxt, VCPU_REGS_RCX) = ecx; *reg_write(ctxt, VCPU_REGS_RDX) = edx; return X86EMUL_CONTINUE; } static int em_sahf(struct x86_emulate_ctxt *ctxt) { u32 flags; flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | X86_EFLAGS_SF; flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8; ctxt->eflags &= ~0xffUL; ctxt->eflags |= flags | X86_EFLAGS_FIXED; return X86EMUL_CONTINUE; } static int em_lahf(struct x86_emulate_ctxt *ctxt) { *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL; *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8; return X86EMUL_CONTINUE; } static int em_bswap(struct x86_emulate_ctxt *ctxt) { switch (ctxt->op_bytes) { #ifdef CONFIG_X86_64 case 8: asm("bswap %0" : "+r"(ctxt->dst.val)); break; #endif default: asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val)); break; } return X86EMUL_CONTINUE; } static int em_clflush(struct x86_emulate_ctxt *ctxt) { /* emulating clflush regardless of cpuid */ return X86EMUL_CONTINUE; } static int em_clflushopt(struct x86_emulate_ctxt *ctxt) { /* emulating clflushopt regardless of cpuid */ return X86EMUL_CONTINUE; } static int em_movsxd(struct x86_emulate_ctxt *ctxt) { ctxt->dst.val = (s32) ctxt->src.val; return X86EMUL_CONTINUE; } static int check_fxsr(struct x86_emulate_ctxt *ctxt) { if (!ctxt->ops->guest_has_fxsr(ctxt)) return emulate_ud(ctxt); if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) return emulate_nm(ctxt); /* * Don't emulate a case that should never be hit, instead of working * around a lack of fxsave64/fxrstor64 on old compilers. */ if (ctxt->mode >= X86EMUL_MODE_PROT64) return X86EMUL_UNHANDLEABLE; return X86EMUL_CONTINUE; } /* * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save * and restore MXCSR. */ static size_t __fxstate_size(int nregs) { return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16; } static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt) { bool cr4_osfxsr; if (ctxt->mode == X86EMUL_MODE_PROT64) return __fxstate_size(16); cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR; return __fxstate_size(cr4_osfxsr ? 8 : 0); } /* * FXSAVE and FXRSTOR have 4 different formats depending on execution mode, * 1) 16 bit mode * 2) 32 bit mode * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs * preserve whole 32 bit values, though, so (1) and (2) are the same wrt. * save and restore * 3) 64-bit mode with REX.W prefix * - like (2), but XMM 8-15 are being saved and restored * 4) 64-bit mode without REX.W prefix * - like (3), but FIP and FDP are 64 bit * * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the * desired result. (4) is not emulated. * * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS * and FPU DS) should match. */ static int em_fxsave(struct x86_emulate_ctxt *ctxt) { struct fxregs_state fx_state; int rc; rc = check_fxsr(ctxt); if (rc != X86EMUL_CONTINUE) return rc; kvm_fpu_get(); rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state)); kvm_fpu_put(); if (rc != X86EMUL_CONTINUE) return rc; return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, fxstate_size(ctxt)); } /* * FXRSTOR might restore XMM registers not provided by the guest. Fill * in the host registers (via FXSAVE) instead, so they won't be modified. * (preemption has to stay disabled until FXRSTOR). * * Use noinline to keep the stack for other functions called by callers small. */ static noinline int fxregs_fixup(struct fxregs_state *fx_state, const size_t used_size) { struct fxregs_state fx_tmp; int rc; rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp)); memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size, __fxstate_size(16) - used_size); return rc; } static int em_fxrstor(struct x86_emulate_ctxt *ctxt) { struct fxregs_state fx_state; int rc; size_t size; rc = check_fxsr(ctxt); if (rc != X86EMUL_CONTINUE) return rc; size = fxstate_size(ctxt); rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size); if (rc != X86EMUL_CONTINUE) return rc; kvm_fpu_get(); if (size < __fxstate_size(16)) { rc = fxregs_fixup(&fx_state, size); if (rc != X86EMUL_CONTINUE) goto out; } if (fx_state.mxcsr >> 16) { rc = emulate_gp(ctxt, 0); goto out; } if (rc == X86EMUL_CONTINUE) rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state)); out: kvm_fpu_put(); return rc; } static int em_xsetbv(struct x86_emulate_ctxt *ctxt) { u32 eax, ecx, edx; if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSXSAVE)) return emulate_ud(ctxt); eax = reg_read(ctxt, VCPU_REGS_RAX); edx = reg_read(ctxt, VCPU_REGS_RDX); ecx = reg_read(ctxt, VCPU_REGS_RCX); if (ctxt->ops->set_xcr(ctxt, ecx, ((u64)edx << 32) | eax)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; } static bool valid_cr(int nr) { switch (nr) { case 0: case 2 ... 4: case 8: return true; default: return false; } } static int check_cr_access(struct x86_emulate_ctxt *ctxt) { if (!valid_cr(ctxt->modrm_reg)) return emulate_ud(ctxt); return X86EMUL_CONTINUE; } static int check_dr_read(struct x86_emulate_ctxt *ctxt) { int dr = ctxt->modrm_reg; u64 cr4; if (dr > 7) return emulate_ud(ctxt); cr4 = ctxt->ops->get_cr(ctxt, 4); if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5)) return emulate_ud(ctxt); if (ctxt->ops->get_dr(ctxt, 7) & DR7_GD) { ulong dr6; dr6 = ctxt->ops->get_dr(ctxt, 6); dr6 &= ~DR_TRAP_BITS; dr6 |= DR6_BD | DR6_ACTIVE_LOW; ctxt->ops->set_dr(ctxt, 6, dr6); return emulate_db(ctxt); } return X86EMUL_CONTINUE; } static int check_dr_write(struct x86_emulate_ctxt *ctxt) { u64 new_val = ctxt->src.val64; int dr = ctxt->modrm_reg; if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL)) return emulate_gp(ctxt, 0); return check_dr_read(ctxt); } static int check_svme(struct x86_emulate_ctxt *ctxt) { u64 efer = 0; ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if (!(efer & EFER_SVME)) return emulate_ud(ctxt); return X86EMUL_CONTINUE; } static int check_svme_pa(struct x86_emulate_ctxt *ctxt) { u64 rax = reg_read(ctxt, VCPU_REGS_RAX); /* Valid physical address? */ if (rax & 0xffff000000000000ULL) return emulate_gp(ctxt, 0); return check_svme(ctxt); } static int check_rdtsc(struct x86_emulate_ctxt *ctxt) { u64 cr4 = ctxt->ops->get_cr(ctxt, 4); if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; } static int check_rdpmc(struct x86_emulate_ctxt *ctxt) { u64 cr4 = ctxt->ops->get_cr(ctxt, 4); u64 rcx = reg_read(ctxt, VCPU_REGS_RCX); /* * VMware allows access to these Pseduo-PMCs even when read via RDPMC * in Ring3 when CR4.PCE=0. */ if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx)) return X86EMUL_CONTINUE; /* * If CR4.PCE is set, the SDM requires CPL=0 or CR0.PE=0. The CR0.PE * check however is unnecessary because CPL is always 0 outside * protected mode. */ if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) || ctxt->ops->check_rdpmc_early(ctxt, rcx)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; } static int check_perm_in(struct x86_emulate_ctxt *ctxt) { ctxt->dst.bytes = min(ctxt->dst.bytes, 4u); if (!emulator_io_permitted(ctxt, ctxt->src.val, ctxt->dst.bytes)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; } static int check_perm_out(struct x86_emulate_ctxt *ctxt) { ctxt->src.bytes = min(ctxt->src.bytes, 4u); if (!emulator_io_permitted(ctxt, ctxt->dst.val, ctxt->src.bytes)) return emulate_gp(ctxt, 0); return X86EMUL_CONTINUE; } #define D(_y) { .flags = (_y) } #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i } #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \ .intercept = x86_intercept_##_i, .check_perm = (_p) } #define N D(NotImpl) #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) } #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) } #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) } #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) } #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) } #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) } #define I(_f, _e) { .flags = (_f), .u.execute = (_e) } #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) } #define II(_f, _e, _i) \ { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i } #define IIP(_f, _e, _i, _p) \ { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \ .intercept = x86_intercept_##_i, .check_perm = (_p) } #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) } #define D2bv(_f) D((_f) | ByteOp), D(_f) #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p) #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e) #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e) #define I2bvIP(_f, _e, _i, _p) \ IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p) #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \ F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \ F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e) static const struct opcode group7_rm0[] = { N, I(SrcNone | Priv | EmulateOnUD, em_hypercall), N, N, N, N, N, N, }; static const struct opcode group7_rm1[] = { DI(SrcNone | Priv, monitor), DI(SrcNone | Priv, mwait), N, N, N, N, N, N, }; static const struct opcode group7_rm2[] = { N, II(ImplicitOps | Priv, em_xsetbv, xsetbv), N, N, N, N, N, N, }; static const struct opcode group7_rm3[] = { DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa), II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall), DIP(SrcNone | Prot | Priv, vmload, check_svme_pa), DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa), DIP(SrcNone | Prot | Priv, stgi, check_svme), DIP(SrcNone | Prot | Priv, clgi, check_svme), DIP(SrcNone | Prot | Priv, skinit, check_svme), DIP(SrcNone | Prot | Priv, invlpga, check_svme), }; static const struct opcode group7_rm7[] = { N, DIP(SrcNone, rdtscp, check_rdtsc), N, N, N, N, N, N, }; static const struct opcode group1[] = { F(Lock, em_add), F(Lock | PageTable, em_or), F(Lock, em_adc), F(Lock, em_sbb), F(Lock | PageTable, em_and), F(Lock, em_sub), F(Lock, em_xor), F(NoWrite, em_cmp), }; static const struct opcode group1A[] = { I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N, }; static const struct opcode group2[] = { F(DstMem | ModRM, em_rol), F(DstMem | ModRM, em_ror), F(DstMem | ModRM, em_rcl), F(DstMem | ModRM, em_rcr), F(DstMem | ModRM, em_shl), F(DstMem | ModRM, em_shr), F(DstMem | ModRM, em_shl), F(DstMem | ModRM, em_sar), }; static const struct opcode group3[] = { F(DstMem | SrcImm | NoWrite, em_test), F(DstMem | SrcImm | NoWrite, em_test), F(DstMem | SrcNone | Lock, em_not), F(DstMem | SrcNone | Lock, em_neg), F(DstXacc | Src2Mem, em_mul_ex), F(DstXacc | Src2Mem, em_imul_ex), F(DstXacc | Src2Mem, em_div_ex), F(DstXacc | Src2Mem, em_idiv_ex), }; static const struct opcode group4[] = { F(ByteOp | DstMem | SrcNone | Lock, em_inc), F(ByteOp | DstMem | SrcNone | Lock, em_dec), N, N, N, N, N, N, }; static const struct opcode group5[] = { F(DstMem | SrcNone | Lock, em_inc), F(DstMem | SrcNone | Lock, em_dec), I(SrcMem | NearBranch | IsBranch, em_call_near_abs), I(SrcMemFAddr | ImplicitOps | IsBranch, em_call_far), I(SrcMem | NearBranch | IsBranch, em_jmp_abs), I(SrcMemFAddr | ImplicitOps | IsBranch, em_jmp_far), I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined), }; static const struct opcode group6[] = { II(Prot | DstMem, em_sldt, sldt), II(Prot | DstMem, em_str, str), II(Prot | Priv | SrcMem16, em_lldt, lldt), II(Prot | Priv | SrcMem16, em_ltr, ltr), N, N, N, N, }; static const struct group_dual group7 = { { II(Mov | DstMem, em_sgdt, sgdt), II(Mov | DstMem, em_sidt, sidt), II(SrcMem | Priv, em_lgdt, lgdt), II(SrcMem | Priv, em_lidt, lidt), II(SrcNone | DstMem | Mov, em_smsw, smsw), N, II(SrcMem16 | Mov | Priv, em_lmsw, lmsw), II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg), }, { EXT(0, group7_rm0), EXT(0, group7_rm1), EXT(0, group7_rm2), EXT(0, group7_rm3), II(SrcNone | DstMem | Mov, em_smsw, smsw), N, II(SrcMem16 | Mov | Priv, em_lmsw, lmsw), EXT(0, group7_rm7), } }; static const struct opcode group8[] = { N, N, N, N, F(DstMem | SrcImmByte | NoWrite, em_bt), F(DstMem | SrcImmByte | Lock | PageTable, em_bts), F(DstMem | SrcImmByte | Lock, em_btr), F(DstMem | SrcImmByte | Lock | PageTable, em_btc), }; /* * The "memory" destination is actually always a register, since we come * from the register case of group9. */ static const struct gprefix pfx_0f_c7_7 = { N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdpid), }; static const struct group_dual group9 = { { N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N, }, { N, N, N, N, N, N, N, GP(0, &pfx_0f_c7_7), } }; static const struct opcode group11[] = { I(DstMem | SrcImm | Mov | PageTable, em_mov), X7(D(Undefined)), }; static const struct gprefix pfx_0f_ae_7 = { I(SrcMem | ByteOp, em_clflush), I(SrcMem | ByteOp, em_clflushopt), N, N, }; static const struct group_dual group15 = { { I(ModRM | Aligned16, em_fxsave), I(ModRM | Aligned16, em_fxrstor), N, N, N, N, N, GP(0, &pfx_0f_ae_7), }, { N, N, N, N, N, N, N, N, } }; static const struct gprefix pfx_0f_6f_0f_7f = { I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov), }; static const struct instr_dual instr_dual_0f_2b = { I(0, em_mov), N }; static const struct gprefix pfx_0f_2b = { ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N, }; static const struct gprefix pfx_0f_10_0f_11 = { I(Unaligned, em_mov), I(Unaligned, em_mov), N, N, }; static const struct gprefix pfx_0f_28_0f_29 = { I(Aligned, em_mov), I(Aligned, em_mov), N, N, }; static const struct gprefix pfx_0f_e7 = { N, I(Sse, em_mov), N, N, }; static const struct escape escape_d9 = { { N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw), }, { /* 0xC0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xC8 - 0xCF */ N, N, N, N, N, N, N, N, /* 0xD0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xD8 - 0xDF */ N, N, N, N, N, N, N, N, /* 0xE0 - 0xE7 */ N, N, N, N, N, N, N, N, /* 0xE8 - 0xEF */ N, N, N, N, N, N, N, N, /* 0xF0 - 0xF7 */ N, N, N, N, N, N, N, N, /* 0xF8 - 0xFF */ N, N, N, N, N, N, N, N, } }; static const struct escape escape_db = { { N, N, N, N, N, N, N, N, }, { /* 0xC0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xC8 - 0xCF */ N, N, N, N, N, N, N, N, /* 0xD0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xD8 - 0xDF */ N, N, N, N, N, N, N, N, /* 0xE0 - 0xE7 */ N, N, N, I(ImplicitOps, em_fninit), N, N, N, N, /* 0xE8 - 0xEF */ N, N, N, N, N, N, N, N, /* 0xF0 - 0xF7 */ N, N, N, N, N, N, N, N, /* 0xF8 - 0xFF */ N, N, N, N, N, N, N, N, } }; static const struct escape escape_dd = { { N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw), }, { /* 0xC0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xC8 - 0xCF */ N, N, N, N, N, N, N, N, /* 0xD0 - 0xC7 */ N, N, N, N, N, N, N, N, /* 0xD8 - 0xDF */ N, N, N, N, N, N, N, N, /* 0xE0 - 0xE7 */ N, N, N, N, N, N, N, N, /* 0xE8 - 0xEF */ N, N, N, N, N, N, N, N, /* 0xF0 - 0xF7 */ N, N, N, N, N, N, N, N, /* 0xF8 - 0xFF */ N, N, N, N, N, N, N, N, } }; static const struct instr_dual instr_dual_0f_c3 = { I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N }; static const struct mode_dual mode_dual_63 = { N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd) }; static const struct instr_dual instr_dual_8d = { D(DstReg | SrcMem | ModRM | NoAccess), N }; static const struct opcode opcode_table[256] = { /* 0x00 - 0x07 */ F6ALU(Lock, em_add), I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg), I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg), /* 0x08 - 0x0F */ F6ALU(Lock | PageTable, em_or), I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg), N, /* 0x10 - 0x17 */ F6ALU(Lock, em_adc), I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg), I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg), /* 0x18 - 0x1F */ F6ALU(Lock, em_sbb), I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg), I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg), /* 0x20 - 0x27 */ F6ALU(Lock | PageTable, em_and), N, N, /* 0x28 - 0x2F */ F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das), /* 0x30 - 0x37 */ F6ALU(Lock, em_xor), N, N, /* 0x38 - 0x3F */ F6ALU(NoWrite, em_cmp), N, N, /* 0x40 - 0x4F */ X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)), /* 0x50 - 0x57 */ X8(I(SrcReg | Stack, em_push)), /* 0x58 - 0x5F */ X8(I(DstReg | Stack, em_pop)), /* 0x60 - 0x67 */ I(ImplicitOps | Stack | No64, em_pusha), I(ImplicitOps | Stack | No64, em_popa), N, MD(ModRM, &mode_dual_63), N, N, N, N, /* 0x68 - 0x6F */ I(SrcImm | Mov | Stack, em_push), I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op), I(SrcImmByte | Mov | Stack, em_push), I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op), I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */ I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */ /* 0x70 - 0x7F */ X16(D(SrcImmByte | NearBranch | IsBranch)), /* 0x80 - 0x87 */ G(ByteOp | DstMem | SrcImm, group1), G(DstMem | SrcImm, group1), G(ByteOp | DstMem | SrcImm | No64, group1), G(DstMem | SrcImmByte, group1), F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test), I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg), /* 0x88 - 0x8F */ I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov), I2bv(DstReg | SrcMem | ModRM | Mov, em_mov), I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg), ID(0, &instr_dual_8d), I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm), G(0, group1A), /* 0x90 - 0x97 */ DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)), /* 0x98 - 0x9F */ D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd), I(SrcImmFAddr | No64 | IsBranch, em_call_far), N, II(ImplicitOps | Stack, em_pushf, pushf), II(ImplicitOps | Stack, em_popf, popf), I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf), /* 0xA0 - 0xA7 */ I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov), I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov), I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov), F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r), /* 0xA8 - 0xAF */ F2bv(DstAcc | SrcImm | NoWrite, em_test), I2bv(SrcAcc | DstDI | Mov | String, em_mov), I2bv(SrcSI | DstAcc | Mov | String, em_mov), F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r), /* 0xB0 - 0xB7 */ X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)), /* 0xB8 - 0xBF */ X8(I(DstReg | SrcImm64 | Mov, em_mov)), /* 0xC0 - 0xC7 */ G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2), I(ImplicitOps | NearBranch | SrcImmU16 | IsBranch, em_ret_near_imm), I(ImplicitOps | NearBranch | IsBranch, em_ret), I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg), I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg), G(ByteOp, group11), G(0, group11), /* 0xC8 - 0xCF */ I(Stack | SrcImmU16 | Src2ImmByte | IsBranch, em_enter), I(Stack | IsBranch, em_leave), I(ImplicitOps | SrcImmU16 | IsBranch, em_ret_far_imm), I(ImplicitOps | IsBranch, em_ret_far), D(ImplicitOps | IsBranch), DI(SrcImmByte | IsBranch, intn), D(ImplicitOps | No64 | IsBranch), II(ImplicitOps | IsBranch, em_iret, iret), /* 0xD0 - 0xD7 */ G(Src2One | ByteOp, group2), G(Src2One, group2), G(Src2CL | ByteOp, group2), G(Src2CL, group2), I(DstAcc | SrcImmUByte | No64, em_aam), I(DstAcc | SrcImmUByte | No64, em_aad), F(DstAcc | ByteOp | No64, em_salc), I(DstAcc | SrcXLat | ByteOp, em_mov), /* 0xD8 - 0xDF */ N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N, /* 0xE0 - 0xE7 */ X3(I(SrcImmByte | NearBranch | IsBranch, em_loop)), I(SrcImmByte | NearBranch | IsBranch, em_jcxz), I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in), I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out), /* 0xE8 - 0xEF */ I(SrcImm | NearBranch | IsBranch, em_call), D(SrcImm | ImplicitOps | NearBranch | IsBranch), I(SrcImmFAddr | No64 | IsBranch, em_jmp_far), D(SrcImmByte | ImplicitOps | NearBranch | IsBranch), I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in), I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out), /* 0xF0 - 0xF7 */ N, DI(ImplicitOps, icebp), N, N, DI(ImplicitOps | Priv, hlt), D(ImplicitOps), G(ByteOp, group3), G(0, group3), /* 0xF8 - 0xFF */ D(ImplicitOps), D(ImplicitOps), I(ImplicitOps, em_cli), I(ImplicitOps, em_sti), D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5), }; static const struct opcode twobyte_table[256] = { /* 0x00 - 0x0F */ G(0, group6), GD(0, &group7), N, N, N, I(ImplicitOps | EmulateOnUD | IsBranch, em_syscall), II(ImplicitOps | Priv, em_clts, clts), N, DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N, /* 0x10 - 0x1F */ GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11), GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11), N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 4 * prefetch + 4 * reserved NOP */ D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */ D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */ D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */ D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */ /* 0x20 - 0x2F */ DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_access), DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read), IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write, check_cr_access), IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write, check_dr_write), N, N, N, N, GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29), GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29), N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b), N, N, N, N, /* 0x30 - 0x3F */ II(ImplicitOps | Priv, em_wrmsr, wrmsr), IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc), II(ImplicitOps | Priv, em_rdmsr, rdmsr), IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc), I(ImplicitOps | EmulateOnUD | IsBranch, em_sysenter), I(ImplicitOps | Priv | EmulateOnUD | IsBranch, em_sysexit), N, N, N, N, N, N, N, N, N, N, /* 0x40 - 0x4F */ X16(D(DstReg | SrcMem | ModRM)), /* 0x50 - 0x5F */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, /* 0x60 - 0x6F */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f), /* 0x70 - 0x7F */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f), /* 0x80 - 0x8F */ X16(D(SrcImm | NearBranch | IsBranch)), /* 0x90 - 0x9F */ X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)), /* 0xA0 - 0xA7 */ I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg), II(ImplicitOps, em_cpuid, cpuid), F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt), F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld), F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N, /* 0xA8 - 0xAF */ I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg), II(EmulateOnUD | ImplicitOps, em_rsm, rsm), F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts), F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd), F(DstMem | SrcReg | Src2CL | ModRM, em_shrd), GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul), /* 0xB0 - 0xB7 */ I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg), I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg), F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr), I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg), I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg), D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), /* 0xB8 - 0xBF */ N, N, G(BitOp, group8), F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc), I(DstReg | SrcMem | ModRM, em_bsf_c), I(DstReg | SrcMem | ModRM, em_bsr_c), D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), /* 0xC0 - 0xC7 */ F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd), N, ID(0, &instr_dual_0f_c3), N, N, N, GD(0, &group9), /* 0xC8 - 0xCF */ X8(I(DstReg, em_bswap)), /* 0xD0 - 0xDF */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, /* 0xE0 - 0xEF */ N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7), N, N, N, N, N, N, N, N, /* 0xF0 - 0xFF */ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N }; static const struct instr_dual instr_dual_0f_38_f0 = { I(DstReg | SrcMem | Mov, em_movbe), N }; static const struct instr_dual instr_dual_0f_38_f1 = { I(DstMem | SrcReg | Mov, em_movbe), N }; static const struct gprefix three_byte_0f_38_f0 = { ID(0, &instr_dual_0f_38_f0), ID(0, &instr_dual_0f_38_f0), N, N }; static const struct gprefix three_byte_0f_38_f1 = { ID(0, &instr_dual_0f_38_f1), ID(0, &instr_dual_0f_38_f1), N, N }; /* * Insns below are selected by the prefix which indexed by the third opcode * byte. */ static const struct opcode opcode_map_0f_38[256] = { /* 0x00 - 0x7f */ X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), /* 0x80 - 0xef */ X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), /* 0xf0 - 0xf1 */ GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0), GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1), /* 0xf2 - 0xff */ N, N, X4(N), X8(N) }; #undef D #undef N #undef G #undef GD #undef I #undef GP #undef EXT #undef MD #undef ID #undef D2bv #undef D2bvIP #undef I2bv #undef I2bvIP #undef I6ALU static unsigned imm_size(struct x86_emulate_ctxt *ctxt) { unsigned size; size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; if (size == 8) size = 4; return size; } static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op, unsigned size, bool sign_extension) { int rc = X86EMUL_CONTINUE; op->type = OP_IMM; op->bytes = size; op->addr.mem.ea = ctxt->_eip; /* NB. Immediates are sign-extended as necessary. */ switch (op->bytes) { case 1: op->val = insn_fetch(s8, ctxt); break; case 2: op->val = insn_fetch(s16, ctxt); break; case 4: op->val = insn_fetch(s32, ctxt); break; case 8: op->val = insn_fetch(s64, ctxt); break; } if (!sign_extension) { switch (op->bytes) { case 1: op->val &= 0xff; break; case 2: op->val &= 0xffff; break; case 4: op->val &= 0xffffffff; break; } } done: return rc; } static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, unsigned d) { int rc = X86EMUL_CONTINUE; switch (d) { case OpReg: decode_register_operand(ctxt, op); break; case OpImmUByte: rc = decode_imm(ctxt, op, 1, false); break; case OpMem: ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; mem_common: *op = ctxt->memop; ctxt->memopp = op; if (ctxt->d & BitOp) fetch_bit_operand(ctxt); op->orig_val = op->val; break; case OpMem64: ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8; goto mem_common; case OpAcc: op->type = OP_REG; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); fetch_register_operand(op); op->orig_val = op->val; break; case OpAccLo: op->type = OP_REG; op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes; op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); fetch_register_operand(op); op->orig_val = op->val; break; case OpAccHi: if (ctxt->d & ByteOp) { op->type = OP_NONE; break; } op->type = OP_REG; op->bytes = ctxt->op_bytes; op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); fetch_register_operand(op); op->orig_val = op->val; break; case OpDI: op->type = OP_MEM; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.mem.ea = register_address(ctxt, VCPU_REGS_RDI); op->addr.mem.seg = VCPU_SREG_ES; op->val = 0; op->count = 1; break; case OpDX: op->type = OP_REG; op->bytes = 2; op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); fetch_register_operand(op); break; case OpCL: op->type = OP_IMM; op->bytes = 1; op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff; break; case OpImmByte: rc = decode_imm(ctxt, op, 1, true); break; case OpOne: op->type = OP_IMM; op->bytes = 1; op->val = 1; break; case OpImm: rc = decode_imm(ctxt, op, imm_size(ctxt), true); break; case OpImm64: rc = decode_imm(ctxt, op, ctxt->op_bytes, true); break; case OpMem8: ctxt->memop.bytes = 1; if (ctxt->memop.type == OP_REG) { ctxt->memop.addr.reg = decode_register(ctxt, ctxt->modrm_rm, true); fetch_register_operand(&ctxt->memop); } goto mem_common; case OpMem16: ctxt->memop.bytes = 2; goto mem_common; case OpMem32: ctxt->memop.bytes = 4; goto mem_common; case OpImmU16: rc = decode_imm(ctxt, op, 2, false); break; case OpImmU: rc = decode_imm(ctxt, op, imm_size(ctxt), false); break; case OpSI: op->type = OP_MEM; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.mem.ea = register_address(ctxt, VCPU_REGS_RSI); op->addr.mem.seg = ctxt->seg_override; op->val = 0; op->count = 1; break; case OpXLat: op->type = OP_MEM; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.mem.ea = address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RBX) + (reg_read(ctxt, VCPU_REGS_RAX) & 0xff)); op->addr.mem.seg = ctxt->seg_override; op->val = 0; break; case OpImmFAddr: op->type = OP_IMM; op->addr.mem.ea = ctxt->_eip; op->bytes = ctxt->op_bytes + 2; insn_fetch_arr(op->valptr, op->bytes, ctxt); break; case OpMemFAddr: ctxt->memop.bytes = ctxt->op_bytes + 2; goto mem_common; case OpES: op->type = OP_IMM; op->val = VCPU_SREG_ES; break; case OpCS: op->type = OP_IMM; op->val = VCPU_SREG_CS; break; case OpSS: op->type = OP_IMM; op->val = VCPU_SREG_SS; break; case OpDS: op->type = OP_IMM; op->val = VCPU_SREG_DS; break; case OpFS: op->type = OP_IMM; op->val = VCPU_SREG_FS; break; case OpGS: op->type = OP_IMM; op->val = VCPU_SREG_GS; break; case OpImplicit: /* Special instructions do their own operand decoding. */ default: op->type = OP_NONE; /* Disable writeback. */ break; } done: return rc; } int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type) { int rc = X86EMUL_CONTINUE; int mode = ctxt->mode; int def_op_bytes, def_ad_bytes, goffset, simd_prefix; bool op_prefix = false; bool has_seg_override = false; struct opcode opcode; u16 dummy; struct desc_struct desc; ctxt->memop.type = OP_NONE; ctxt->memopp = NULL; ctxt->_eip = ctxt->eip; ctxt->fetch.ptr = ctxt->fetch.data; ctxt->fetch.end = ctxt->fetch.data + insn_len; ctxt->opcode_len = 1; ctxt->intercept = x86_intercept_none; if (insn_len > 0) memcpy(ctxt->fetch.data, insn, insn_len); else { rc = __do_insn_fetch_bytes(ctxt, 1); if (rc != X86EMUL_CONTINUE) goto done; } switch (mode) { case X86EMUL_MODE_REAL: case X86EMUL_MODE_VM86: def_op_bytes = def_ad_bytes = 2; ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS); if (desc.d) def_op_bytes = def_ad_bytes = 4; break; case X86EMUL_MODE_PROT16: def_op_bytes = def_ad_bytes = 2; break; case X86EMUL_MODE_PROT32: def_op_bytes = def_ad_bytes = 4; break; #ifdef CONFIG_X86_64 case X86EMUL_MODE_PROT64: def_op_bytes = 4; def_ad_bytes = 8; break; #endif default: return EMULATION_FAILED; } ctxt->op_bytes = def_op_bytes; ctxt->ad_bytes = def_ad_bytes; /* Legacy prefixes. */ for (;;) { switch (ctxt->b = insn_fetch(u8, ctxt)) { case 0x66: /* operand-size override */ op_prefix = true; /* switch between 2/4 bytes */ ctxt->op_bytes = def_op_bytes ^ 6; break; case 0x67: /* address-size override */ if (mode == X86EMUL_MODE_PROT64) /* switch between 4/8 bytes */ ctxt->ad_bytes = def_ad_bytes ^ 12; else /* switch between 2/4 bytes */ ctxt->ad_bytes = def_ad_bytes ^ 6; break; case 0x26: /* ES override */ has_seg_override = true; ctxt->seg_override = VCPU_SREG_ES; break; case 0x2e: /* CS override */ has_seg_override = true; ctxt->seg_override = VCPU_SREG_CS; break; case 0x36: /* SS override */ has_seg_override = true; ctxt->seg_override = VCPU_SREG_SS; break; case 0x3e: /* DS override */ has_seg_override = true; ctxt->seg_override = VCPU_SREG_DS; break; case 0x64: /* FS override */ has_seg_override = true; ctxt->seg_override = VCPU_SREG_FS; break; case 0x65: /* GS override */ has_seg_override = true; ctxt->seg_override = VCPU_SREG_GS; break; case 0x40 ... 0x4f: /* REX */ if (mode != X86EMUL_MODE_PROT64) goto done_prefixes; ctxt->rex_prefix = ctxt->b; continue; case 0xf0: /* LOCK */ ctxt->lock_prefix = 1; break; case 0xf2: /* REPNE/REPNZ */ case 0xf3: /* REP/REPE/REPZ */ ctxt->rep_prefix = ctxt->b; break; default: goto done_prefixes; } /* Any legacy prefix after a REX prefix nullifies its effect. */ ctxt->rex_prefix = 0; } done_prefixes: /* REX prefix. */ if (ctxt->rex_prefix & 8) ctxt->op_bytes = 8; /* REX.W */ /* Opcode byte(s). */ opcode = opcode_table[ctxt->b]; /* Two-byte opcode? */ if (ctxt->b == 0x0f) { ctxt->opcode_len = 2; ctxt->b = insn_fetch(u8, ctxt); opcode = twobyte_table[ctxt->b]; /* 0F_38 opcode map */ if (ctxt->b == 0x38) { ctxt->opcode_len = 3; ctxt->b = insn_fetch(u8, ctxt); opcode = opcode_map_0f_38[ctxt->b]; } } ctxt->d = opcode.flags; if (ctxt->d & ModRM) ctxt->modrm = insn_fetch(u8, ctxt); /* vex-prefix instructions are not implemented */ if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) && (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) { ctxt->d = NotImpl; } while (ctxt->d & GroupMask) { switch (ctxt->d & GroupMask) { case Group: goffset = (ctxt->modrm >> 3) & 7; opcode = opcode.u.group[goffset]; break; case GroupDual: goffset = (ctxt->modrm >> 3) & 7; if ((ctxt->modrm >> 6) == 3) opcode = opcode.u.gdual->mod3[goffset]; else opcode = opcode.u.gdual->mod012[goffset]; break; case RMExt: goffset = ctxt->modrm & 7; opcode = opcode.u.group[goffset]; break; case Prefix: if (ctxt->rep_prefix && op_prefix) return EMULATION_FAILED; simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix; switch (simd_prefix) { case 0x00: opcode = opcode.u.gprefix->pfx_no; break; case 0x66: opcode = opcode.u.gprefix->pfx_66; break; case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break; case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break; } break; case Escape: if (ctxt->modrm > 0xbf) { size_t size = ARRAY_SIZE(opcode.u.esc->high); u32 index = array_index_nospec( ctxt->modrm - 0xc0, size); opcode = opcode.u.esc->high[index]; } else { opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7]; } break; case InstrDual: if ((ctxt->modrm >> 6) == 3) opcode = opcode.u.idual->mod3; else opcode = opcode.u.idual->mod012; break; case ModeDual: if (ctxt->mode == X86EMUL_MODE_PROT64) opcode = opcode.u.mdual->mode64; else opcode = opcode.u.mdual->mode32; break; default: return EMULATION_FAILED; } ctxt->d &= ~(u64)GroupMask; ctxt->d |= opcode.flags; } ctxt->is_branch = opcode.flags & IsBranch; /* Unrecognised? */ if (ctxt->d == 0) return EMULATION_FAILED; ctxt->execute = opcode.u.execute; if (unlikely(emulation_type & EMULTYPE_TRAP_UD) && likely(!(ctxt->d & EmulateOnUD))) return EMULATION_FAILED; if (unlikely(ctxt->d & (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch| No16))) { /* * These are copied unconditionally here, and checked unconditionally * in x86_emulate_insn. */ ctxt->check_perm = opcode.check_perm; ctxt->intercept = opcode.intercept; if (ctxt->d & NotImpl) return EMULATION_FAILED; if (mode == X86EMUL_MODE_PROT64) { if (ctxt->op_bytes == 4 && (ctxt->d & Stack)) ctxt->op_bytes = 8; else if (ctxt->d & NearBranch) ctxt->op_bytes = 8; } if (ctxt->d & Op3264) { if (mode == X86EMUL_MODE_PROT64) ctxt->op_bytes = 8; else ctxt->op_bytes = 4; } if ((ctxt->d & No16) && ctxt->op_bytes == 2) ctxt->op_bytes = 4; if (ctxt->d & Sse) ctxt->op_bytes = 16; else if (ctxt->d & Mmx) ctxt->op_bytes = 8; } /* ModRM and SIB bytes. */ if (ctxt->d & ModRM) { rc = decode_modrm(ctxt, &ctxt->memop); if (!has_seg_override) { has_seg_override = true; ctxt->seg_override = ctxt->modrm_seg; } } else if (ctxt->d & MemAbs) rc = decode_abs(ctxt, &ctxt->memop); if (rc != X86EMUL_CONTINUE) goto done; if (!has_seg_override) ctxt->seg_override = VCPU_SREG_DS; ctxt->memop.addr.mem.seg = ctxt->seg_override; /* * Decode and fetch the source operand: register, memory * or immediate. */ rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask); if (rc != X86EMUL_CONTINUE) goto done; /* * Decode and fetch the second source operand: register, memory * or immediate. */ rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask); if (rc != X86EMUL_CONTINUE) goto done; /* Decode and fetch the destination operand: register or memory. */ rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); if (ctxt->rip_relative && likely(ctxt->memopp)) ctxt->memopp->addr.mem.ea = address_mask(ctxt, ctxt->memopp->addr.mem.ea + ctxt->_eip); done: if (rc == X86EMUL_PROPAGATE_FAULT) ctxt->have_exception = true; return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; } bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt) { return ctxt->d & PageTable; } static bool string_insn_completed(struct x86_emulate_ctxt *ctxt) { /* The second termination condition only applies for REPE * and REPNE. Test if the repeat string operation prefix is * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the * corresponding termination condition according to: * - if REPE/REPZ and ZF = 0 then done * - if REPNE/REPNZ and ZF = 1 then done */ if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) || (ctxt->b == 0xae) || (ctxt->b == 0xaf)) && (((ctxt->rep_prefix == REPE_PREFIX) && ((ctxt->eflags & X86_EFLAGS_ZF) == 0)) || ((ctxt->rep_prefix == REPNE_PREFIX) && ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF)))) return true; return false; } static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt) { int rc; kvm_fpu_get(); rc = asm_safe("fwait"); kvm_fpu_put(); if (unlikely(rc != X86EMUL_CONTINUE)) return emulate_exception(ctxt, MF_VECTOR, 0, false); return X86EMUL_CONTINUE; } static void fetch_possible_mmx_operand(struct operand *op) { if (op->type == OP_MM) kvm_read_mmx_reg(op->addr.mm, &op->mm_val); } static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop) { ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF; if (!(ctxt->d & ByteOp)) fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE; asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n" : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags), [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT : "c"(ctxt->src2.val)); ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); if (!fop) /* exception is returned in fop variable */ return emulate_de(ctxt); return X86EMUL_CONTINUE; } void init_decode_cache(struct x86_emulate_ctxt *ctxt) { /* Clear fields that are set conditionally but read without a guard. */ ctxt->rip_relative = false; ctxt->rex_prefix = 0; ctxt->lock_prefix = 0; ctxt->rep_prefix = 0; ctxt->regs_valid = 0; ctxt->regs_dirty = 0; ctxt->io_read.pos = 0; ctxt->io_read.end = 0; ctxt->mem_read.end = 0; } int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; int rc = X86EMUL_CONTINUE; int saved_dst_type = ctxt->dst.type; bool is_guest_mode = ctxt->ops->is_guest_mode(ctxt); ctxt->mem_read.pos = 0; /* LOCK prefix is allowed only with some instructions */ if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) { rc = emulate_ud(ctxt); goto done; } if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) { rc = emulate_ud(ctxt); goto done; } if (unlikely(ctxt->d & (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) { if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) || (ctxt->d & Undefined)) { rc = emulate_ud(ctxt); goto done; } if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM))) || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) { rc = emulate_ud(ctxt); goto done; } if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) { rc = emulate_nm(ctxt); goto done; } if (ctxt->d & Mmx) { rc = flush_pending_x87_faults(ctxt); if (rc != X86EMUL_CONTINUE) goto done; /* * Now that we know the fpu is exception safe, we can fetch * operands from it. */ fetch_possible_mmx_operand(&ctxt->src); fetch_possible_mmx_operand(&ctxt->src2); if (!(ctxt->d & Mov)) fetch_possible_mmx_operand(&ctxt->dst); } if (unlikely(is_guest_mode) && ctxt->intercept) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_PRE_EXCEPT); if (rc != X86EMUL_CONTINUE) goto done; } /* Instruction can only be executed in protected mode */ if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) { rc = emulate_ud(ctxt); goto done; } /* Privileged instruction can be executed only in CPL=0 */ if ((ctxt->d & Priv) && ops->cpl(ctxt)) { if (ctxt->d & PrivUD) rc = emulate_ud(ctxt); else rc = emulate_gp(ctxt, 0); goto done; } /* Do instruction specific permission checks */ if (ctxt->d & CheckPerm) { rc = ctxt->check_perm(ctxt); if (rc != X86EMUL_CONTINUE) goto done; } if (unlikely(is_guest_mode) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_EXCEPT); if (rc != X86EMUL_CONTINUE) goto done; } if (ctxt->rep_prefix && (ctxt->d & String)) { /* All REP prefixes have the same first termination condition */ if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) { string_registers_quirk(ctxt); ctxt->eip = ctxt->_eip; ctxt->eflags &= ~X86_EFLAGS_RF; goto done; } } } if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) { rc = segmented_read(ctxt, ctxt->src.addr.mem, ctxt->src.valptr, ctxt->src.bytes); if (rc != X86EMUL_CONTINUE) goto done; ctxt->src.orig_val64 = ctxt->src.val64; } if (ctxt->src2.type == OP_MEM) { rc = segmented_read(ctxt, ctxt->src2.addr.mem, &ctxt->src2.val, ctxt->src2.bytes); if (rc != X86EMUL_CONTINUE) goto done; } if ((ctxt->d & DstMask) == ImplicitOps) goto special_insn; if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) { /* optimisation - avoid slow emulated read if Mov */ rc = segmented_read(ctxt, ctxt->dst.addr.mem, &ctxt->dst.val, ctxt->dst.bytes); if (rc != X86EMUL_CONTINUE) { if (!(ctxt->d & NoWrite) && rc == X86EMUL_PROPAGATE_FAULT && ctxt->exception.vector == PF_VECTOR) ctxt->exception.error_code |= PFERR_WRITE_MASK; goto done; } } /* Copy full 64-bit value for CMPXCHG8B. */ ctxt->dst.orig_val64 = ctxt->dst.val64; special_insn: if (unlikely(is_guest_mode) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_MEMACCESS); if (rc != X86EMUL_CONTINUE) goto done; } if (ctxt->rep_prefix && (ctxt->d & String)) ctxt->eflags |= X86_EFLAGS_RF; else ctxt->eflags &= ~X86_EFLAGS_RF; if (ctxt->execute) { if (ctxt->d & Fastop) rc = fastop(ctxt, ctxt->fop); else rc = ctxt->execute(ctxt); if (rc != X86EMUL_CONTINUE) goto done; goto writeback; } if (ctxt->opcode_len == 2) goto twobyte_insn; else if (ctxt->opcode_len == 3) goto threebyte_insn; switch (ctxt->b) { case 0x70 ... 0x7f: /* jcc (short) */ if (test_cc(ctxt->b, ctxt->eflags)) rc = jmp_rel(ctxt, ctxt->src.val); break; case 0x8d: /* lea r16/r32, m */ ctxt->dst.val = ctxt->src.addr.mem.ea; break; case 0x90 ... 0x97: /* nop / xchg reg, rax */ if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX)) ctxt->dst.type = OP_NONE; else rc = em_xchg(ctxt); break; case 0x98: /* cbw/cwde/cdqe */ switch (ctxt->op_bytes) { case 2: ctxt->dst.val = (s8)ctxt->dst.val; break; case 4: ctxt->dst.val = (s16)ctxt->dst.val; break; case 8: ctxt->dst.val = (s32)ctxt->dst.val; break; } break; case 0xcc: /* int3 */ rc = emulate_int(ctxt, 3); break; case 0xcd: /* int n */ rc = emulate_int(ctxt, ctxt->src.val); break; case 0xce: /* into */ if (ctxt->eflags & X86_EFLAGS_OF) rc = emulate_int(ctxt, 4); break; case 0xe9: /* jmp rel */ case 0xeb: /* jmp rel short */ rc = jmp_rel(ctxt, ctxt->src.val); ctxt->dst.type = OP_NONE; /* Disable writeback. */ break; case 0xf4: /* hlt */ ctxt->ops->halt(ctxt); break; case 0xf5: /* cmc */ /* complement carry flag from eflags reg */ ctxt->eflags ^= X86_EFLAGS_CF; break; case 0xf8: /* clc */ ctxt->eflags &= ~X86_EFLAGS_CF; break; case 0xf9: /* stc */ ctxt->eflags |= X86_EFLAGS_CF; break; case 0xfc: /* cld */ ctxt->eflags &= ~X86_EFLAGS_DF; break; case 0xfd: /* std */ ctxt->eflags |= X86_EFLAGS_DF; break; default: goto cannot_emulate; } if (rc != X86EMUL_CONTINUE) goto done; writeback: if (ctxt->d & SrcWrite) { BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR); rc = writeback(ctxt, &ctxt->src); if (rc != X86EMUL_CONTINUE) goto done; } if (!(ctxt->d & NoWrite)) { rc = writeback(ctxt, &ctxt->dst); if (rc != X86EMUL_CONTINUE) goto done; } /* * restore dst type in case the decoding will be reused * (happens for string instruction ) */ ctxt->dst.type = saved_dst_type; if ((ctxt->d & SrcMask) == SrcSI) string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src); if ((ctxt->d & DstMask) == DstDI) string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst); if (ctxt->rep_prefix && (ctxt->d & String)) { unsigned int count; struct read_cache *r = &ctxt->io_read; if ((ctxt->d & SrcMask) == SrcSI) count = ctxt->src.count; else count = ctxt->dst.count; register_address_increment(ctxt, VCPU_REGS_RCX, -count); if (!string_insn_completed(ctxt)) { /* * Re-enter guest when pio read ahead buffer is empty * or, if it is not used, after each 1024 iteration. */ if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) && (r->end == 0 || r->end != r->pos)) { /* * Reset read cache. Usually happens before * decode, but since instruction is restarted * we have to do it here. */ ctxt->mem_read.end = 0; writeback_registers(ctxt); return EMULATION_RESTART; } goto done; /* skip rip writeback */ } ctxt->eflags &= ~X86_EFLAGS_RF; } ctxt->eip = ctxt->_eip; if (ctxt->mode != X86EMUL_MODE_PROT64) ctxt->eip = (u32)ctxt->_eip; done: if (rc == X86EMUL_PROPAGATE_FAULT) { if (KVM_EMULATOR_BUG_ON(ctxt->exception.vector > 0x1f, ctxt)) return EMULATION_FAILED; ctxt->have_exception = true; } if (rc == X86EMUL_INTERCEPTED) return EMULATION_INTERCEPTED; if (rc == X86EMUL_CONTINUE) writeback_registers(ctxt); return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; twobyte_insn: switch (ctxt->b) { case 0x09: /* wbinvd */ (ctxt->ops->wbinvd)(ctxt); break; case 0x08: /* invd */ case 0x0d: /* GrpP (prefetch) */ case 0x18: /* Grp16 (prefetch/nop) */ case 0x1f: /* nop */ break; case 0x20: /* mov cr, reg */ ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg); break; case 0x21: /* mov from dr to reg */ ctxt->dst.val = ops->get_dr(ctxt, ctxt->modrm_reg); break; case 0x40 ... 0x4f: /* cmov */ if (test_cc(ctxt->b, ctxt->eflags)) ctxt->dst.val = ctxt->src.val; else if (ctxt->op_bytes != 4) ctxt->dst.type = OP_NONE; /* no writeback */ break; case 0x80 ... 0x8f: /* jnz rel, etc*/ if (test_cc(ctxt->b, ctxt->eflags)) rc = jmp_rel(ctxt, ctxt->src.val); break; case 0x90 ... 0x9f: /* setcc r/m8 */ ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags); break; case 0xb6 ... 0xb7: /* movzx */ ctxt->dst.bytes = ctxt->op_bytes; ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val : (u16) ctxt->src.val; break; case 0xbe ... 0xbf: /* movsx */ ctxt->dst.bytes = ctxt->op_bytes; ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val : (s16) ctxt->src.val; break; default: goto cannot_emulate; } threebyte_insn: if (rc != X86EMUL_CONTINUE) goto done; goto writeback; cannot_emulate: return EMULATION_FAILED; } void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt) { invalidate_registers(ctxt); } void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt) { writeback_registers(ctxt); } bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt) { if (ctxt->rep_prefix && (ctxt->d & String)) return false; if (ctxt->d & TwoMemOp) return false; return true; }
3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 2 2 3 3 2 3 3 3 2 3 3 3 3 3 15 15 15 11 11 31 29 31 1 18 17 1 1 1 1 1 17 11 10 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 1 25 25 25 25 1 1 1 1 1 1 1 10 15 15 15 15 15 15 20 31 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 // SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2001-2004 by David Brownell */ /* this file is part of ehci-hcd.c */ /*-------------------------------------------------------------------------*/ /* * EHCI Root Hub ... the nonsharable stuff * * Registers don't need cpu_to_le32, that happens transparently */ /*-------------------------------------------------------------------------*/ #define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E) #ifdef CONFIG_PM static void unlink_empty_async_suspended(struct ehci_hcd *ehci); static int persist_enabled_on_companion(struct usb_device *udev, void *unused) { return !udev->maxchild && udev->persist_enabled && udev->bus->root_hub->speed < USB_SPEED_HIGH; } /* After a power loss, ports that were owned by the companion must be * reset so that the companion can still own them. */ static void ehci_handover_companion_ports(struct ehci_hcd *ehci) { u32 __iomem *reg; u32 status; int port; __le32 buf; struct usb_hcd *hcd = ehci_to_hcd(ehci); if (!ehci->owned_ports) return; /* * USB 1.1 devices are mostly HIDs, which don't need to persist across * suspends. If we ensure that none of our companion's devices have * persist_enabled (by looking through all USB 1.1 buses in the system), * we can skip this and avoid slowing resume down. Devices without * persist will just get reenumerated shortly after resume anyway. */ if (!usb_for_each_dev(NULL, persist_enabled_on_companion)) return; /* Make sure the ports are powered */ port = HCS_N_PORTS(ehci->hcs_params); while (port--) { if (test_bit(port, &ehci->owned_ports)) { reg = &ehci->regs->port_status[port]; status = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; if (!(status & PORT_POWER)) ehci_port_power(ehci, port, true); } } /* Give the connections some time to appear */ msleep(20); spin_lock_irq(&ehci->lock); port = HCS_N_PORTS(ehci->hcs_params); while (port--) { if (test_bit(port, &ehci->owned_ports)) { reg = &ehci->regs->port_status[port]; status = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; /* Port already owned by companion? */ if (status & PORT_OWNER) clear_bit(port, &ehci->owned_ports); else if (test_bit(port, &ehci->companion_ports)) ehci_writel(ehci, status & ~PORT_PE, reg); else { spin_unlock_irq(&ehci->lock); ehci_hub_control(hcd, SetPortFeature, USB_PORT_FEAT_RESET, port + 1, NULL, 0); spin_lock_irq(&ehci->lock); } } } spin_unlock_irq(&ehci->lock); if (!ehci->owned_ports) return; msleep(90); /* Wait for resets to complete */ spin_lock_irq(&ehci->lock); port = HCS_N_PORTS(ehci->hcs_params); while (port--) { if (test_bit(port, &ehci->owned_ports)) { spin_unlock_irq(&ehci->lock); ehci_hub_control(hcd, GetPortStatus, 0, port + 1, (char *) &buf, sizeof(buf)); spin_lock_irq(&ehci->lock); /* The companion should now own the port, * but if something went wrong the port must not * remain enabled. */ reg = &ehci->regs->port_status[port]; status = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; if (status & PORT_OWNER) ehci_writel(ehci, status | PORT_CSC, reg); else { ehci_dbg(ehci, "failed handover port %d: %x\n", port + 1, status); ehci_writel(ehci, status & ~PORT_PE, reg); } } } ehci->owned_ports = 0; spin_unlock_irq(&ehci->lock); } static int ehci_port_change(struct ehci_hcd *ehci) { int i = HCS_N_PORTS(ehci->hcs_params); /* First check if the controller indicates a change event */ if (ehci_readl(ehci, &ehci->regs->status) & STS_PCD) return 1; /* * Not all controllers appear to update this while going from D3 to D0, * so check the individual port status registers as well */ while (i--) if (ehci_readl(ehci, &ehci->regs->port_status[i]) & PORT_CSC) return 1; return 0; } void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci, bool suspending, bool do_wakeup) { int port; u32 temp; /* If remote wakeup is enabled for the root hub but disabled * for the controller, we must adjust all the port wakeup flags * when the controller is suspended or resumed. In all other * cases they don't need to be changed. */ if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup || do_wakeup) return; spin_lock_irq(&ehci->lock); /* clear phy low-power mode before changing wakeup flags */ if (ehci->has_tdi_phy_lpm) { port = HCS_N_PORTS(ehci->hcs_params); while (port--) { u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port]; temp = ehci_readl(ehci, hostpc_reg); ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg); } spin_unlock_irq(&ehci->lock); msleep(5); spin_lock_irq(&ehci->lock); } port = HCS_N_PORTS(ehci->hcs_params); while (port--) { u32 __iomem *reg = &ehci->regs->port_status[port]; u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; u32 t2 = t1 & ~PORT_WAKE_BITS; /* If we are suspending the controller, clear the flags. * If we are resuming the controller, set the wakeup flags. */ if (!suspending) { if (t1 & PORT_CONNECT) t2 |= PORT_WKOC_E | PORT_WKDISC_E; else t2 |= PORT_WKOC_E | PORT_WKCONN_E; } ehci_writel(ehci, t2, reg); } /* enter phy low-power mode again */ if (ehci->has_tdi_phy_lpm) { port = HCS_N_PORTS(ehci->hcs_params); while (port--) { u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port]; temp = ehci_readl(ehci, hostpc_reg); ehci_writel(ehci, temp | HOSTPC_PHCD, hostpc_reg); } } /* Does the root hub have a port wakeup pending? */ if (!suspending && ehci_port_change(ehci)) usb_hcd_resume_root_hub(ehci_to_hcd(ehci)); spin_unlock_irq(&ehci->lock); } EXPORT_SYMBOL_GPL(ehci_adjust_port_wakeup_flags); static int ehci_bus_suspend (struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci (hcd); int port; int mask; int changed; bool fs_idle_delay; ehci_dbg(ehci, "suspend root hub\n"); if (time_before (jiffies, ehci->next_statechange)) msleep(5); /* stop the schedules */ ehci_quiesce(ehci); spin_lock_irq (&ehci->lock); if (ehci->rh_state < EHCI_RH_RUNNING) goto done; /* Once the controller is stopped, port resumes that are already * in progress won't complete. Hence if remote wakeup is enabled * for the root hub and any ports are in the middle of a resume or * remote wakeup, we must fail the suspend. */ if (hcd->self.root_hub->do_remote_wakeup) { if (ehci->resuming_ports) { spin_unlock_irq(&ehci->lock); ehci_dbg(ehci, "suspend failed because a port is resuming\n"); return -EBUSY; } } /* Unlike other USB host controller types, EHCI doesn't have * any notion of "global" or bus-wide suspend. The driver has * to manually suspend all the active unsuspended ports, and * then manually resume them in the bus_resume() routine. */ ehci->bus_suspended = 0; ehci->owned_ports = 0; changed = 0; fs_idle_delay = false; port = HCS_N_PORTS(ehci->hcs_params); while (port--) { u32 __iomem *reg = &ehci->regs->port_status [port]; u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; u32 t2 = t1 & ~PORT_WAKE_BITS; /* keep track of which ports we suspend */ if (t1 & PORT_OWNER) set_bit(port, &ehci->owned_ports); else if ((t1 & PORT_PE) && !(t1 & PORT_SUSPEND)) { t2 |= PORT_SUSPEND; set_bit(port, &ehci->bus_suspended); } /* enable remote wakeup on all ports, if told to do so */ if (hcd->self.root_hub->do_remote_wakeup) { /* only enable appropriate wake bits, otherwise the * hardware can not go phy low power mode. If a race * condition happens here(connection change during bits * set), the port change detection will finally fix it. */ if (t1 & PORT_CONNECT) t2 |= PORT_WKOC_E | PORT_WKDISC_E; else t2 |= PORT_WKOC_E | PORT_WKCONN_E; } if (t1 != t2) { /* * On some controllers, Wake-On-Disconnect will * generate false wakeup signals until the bus * switches over to full-speed idle. For their * sake, add a delay if we need one. */ if ((t2 & PORT_WKDISC_E) && ehci_port_speed(ehci, t2) == USB_PORT_STAT_HIGH_SPEED) fs_idle_delay = true; ehci_writel(ehci, t2, reg); changed = 1; } } spin_unlock_irq(&ehci->lock); if (changed && ehci_has_fsl_susp_errata(ehci)) /* * Wait for at least 10 millisecondes to ensure the controller * enter the suspend status before initiating a port resume * using the Force Port Resume bit (Not-EHCI compatible). */ usleep_range(10000, 20000); if ((changed && ehci->has_tdi_phy_lpm) || fs_idle_delay) { /* * Wait for HCD to enter low-power mode or for the bus * to switch to full-speed idle. */ usleep_range(5000, 5500); } if (changed && ehci->has_tdi_phy_lpm) { spin_lock_irq(&ehci->lock); port = HCS_N_PORTS(ehci->hcs_params); while (port--) { u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port]; u32 t3; t3 = ehci_readl(ehci, hostpc_reg); ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg); t3 = ehci_readl(ehci, hostpc_reg); ehci_dbg(ehci, "Port %d phy low-power mode %s\n", port, (t3 & HOSTPC_PHCD) ? "succeeded" : "failed"); } spin_unlock_irq(&ehci->lock); } /* Apparently some devices need a >= 1-uframe delay here */ if (ehci->bus_suspended) udelay(150); /* turn off now-idle HC */ ehci_halt (ehci); spin_lock_irq(&ehci->lock); if (ehci->enabled_hrtimer_events & BIT(EHCI_HRTIMER_POLL_DEAD)) ehci_handle_controller_death(ehci); if (ehci->rh_state != EHCI_RH_RUNNING) goto done; ehci->rh_state = EHCI_RH_SUSPENDED; unlink_empty_async_suspended(ehci); /* Some Synopsys controllers mistakenly leave IAA turned on */ ehci_writel(ehci, STS_IAA, &ehci->regs->status); /* Any IAA cycle that started before the suspend is now invalid */ end_iaa_cycle(ehci); ehci_handle_start_intr_unlinks(ehci); ehci_handle_intr_unlinks(ehci); end_free_itds(ehci); /* allow remote wakeup */ mask = INTR_MASK; if (!hcd->self.root_hub->do_remote_wakeup) mask &= ~STS_PCD; ehci_writel(ehci, mask, &ehci->regs->intr_enable); ehci_readl(ehci, &ehci->regs->intr_enable); done: ehci->next_statechange = jiffies + msecs_to_jiffies(10); ehci->enabled_hrtimer_events = 0; ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT; spin_unlock_irq (&ehci->lock); hrtimer_cancel(&ehci->hrtimer); return 0; } /* caller has locked the root hub, and should reset/reinit on error */ static int ehci_bus_resume (struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci (hcd); u32 temp; u32 power_okay; int i; unsigned long resume_needed = 0; if (time_before (jiffies, ehci->next_statechange)) msleep(5); spin_lock_irq (&ehci->lock); if (!HCD_HW_ACCESSIBLE(hcd) || ehci->shutdown) goto shutdown; if (unlikely(ehci->debug)) { if (!dbgp_reset_prep(hcd)) ehci->debug = NULL; else dbgp_external_startup(hcd); } /* Ideally and we've got a real resume here, and no port's power * was lost. (For PCI, that means Vaux was maintained.) But we * could instead be restoring a swsusp snapshot -- so that BIOS was * the last user of the controller, not reset/pm hardware keeping * state we gave to it. */ power_okay = ehci_readl(ehci, &ehci->regs->intr_enable); ehci_dbg(ehci, "resume root hub%s\n", power_okay ? "" : " after power loss"); /* at least some APM implementations will try to deliver * IRQs right away, so delay them until we're ready. */ ehci_writel(ehci, 0, &ehci->regs->intr_enable); /* re-init operational registers */ ehci_writel(ehci, 0, &ehci->regs->segment); ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list); ehci_writel(ehci, (u32) ehci->async->qh_dma, &ehci->regs->async_next); /* restore CMD_RUN, framelist size, and irq threshold */ ehci->command |= CMD_RUN; ehci_writel(ehci, ehci->command, &ehci->regs->command); ehci->rh_state = EHCI_RH_RUNNING; /* * According to Bugzilla #8190, the port status for some controllers * will be wrong without a delay. At their wrong status, the port * is enabled, but not suspended neither resumed. */ i = HCS_N_PORTS(ehci->hcs_params); while (i--) { temp = ehci_readl(ehci, &ehci->regs->port_status[i]); if ((temp & PORT_PE) && !(temp & (PORT_SUSPEND | PORT_RESUME))) { ehci_dbg(ehci, "Port status(0x%x) is wrong\n", temp); spin_unlock_irq(&ehci->lock); msleep(8); spin_lock_irq(&ehci->lock); break; } } if (ehci->shutdown) goto shutdown; /* clear phy low-power mode before resume */ if (ehci->bus_suspended && ehci->has_tdi_phy_lpm) { i = HCS_N_PORTS(ehci->hcs_params); while (i--) { if (test_bit(i, &ehci->bus_suspended)) { u32 __iomem *hostpc_reg = &ehci->regs->hostpc[i]; temp = ehci_readl(ehci, hostpc_reg); ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg); } } spin_unlock_irq(&ehci->lock); msleep(5); spin_lock_irq(&ehci->lock); if (ehci->shutdown) goto shutdown; } /* manually resume the ports we suspended during bus_suspend() */ i = HCS_N_PORTS (ehci->hcs_params); while (i--) { temp = ehci_readl(ehci, &ehci->regs->port_status [i]); temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS); if (test_bit(i, &ehci->bus_suspended) && (temp & PORT_SUSPEND)) { temp |= PORT_RESUME; set_bit(i, &resume_needed); } ehci_writel(ehci, temp, &ehci->regs->port_status [i]); } /* * msleep for USB_RESUME_TIMEOUT ms only if code is trying to resume * port */ if (resume_needed) { spin_unlock_irq(&ehci->lock); msleep(USB_RESUME_TIMEOUT); spin_lock_irq(&ehci->lock); if (ehci->shutdown) goto shutdown; } i = HCS_N_PORTS (ehci->hcs_params); while (i--) { temp = ehci_readl(ehci, &ehci->regs->port_status [i]); if (test_bit(i, &resume_needed)) { temp &= ~(PORT_RWC_BITS | PORT_SUSPEND | PORT_RESUME); ehci_writel(ehci, temp, &ehci->regs->port_status [i]); } } ehci->next_statechange = jiffies + msecs_to_jiffies(5); spin_unlock_irq(&ehci->lock); ehci_handover_companion_ports(ehci); /* Now we can safely re-enable irqs */ spin_lock_irq(&ehci->lock); if (ehci->shutdown) goto shutdown; ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable); (void) ehci_readl(ehci, &ehci->regs->intr_enable); spin_unlock_irq(&ehci->lock); return 0; shutdown: spin_unlock_irq(&ehci->lock); return -ESHUTDOWN; } static unsigned long ehci_get_resuming_ports(struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); return ehci->resuming_ports; } #else #define ehci_bus_suspend NULL #define ehci_bus_resume NULL #define ehci_get_resuming_ports NULL #endif /* CONFIG_PM */ /*-------------------------------------------------------------------------*/ /* * Sets the owner of a port */ static void set_owner(struct ehci_hcd *ehci, int portnum, int new_owner) { u32 __iomem *status_reg; u32 port_status; int try; status_reg = &ehci->regs->port_status[portnum]; /* * The controller won't set the OWNER bit if the port is * enabled, so this loop will sometimes require at least two * iterations: one to disable the port and one to set OWNER. */ for (try = 4; try > 0; --try) { spin_lock_irq(&ehci->lock); port_status = ehci_readl(ehci, status_reg); if ((port_status & PORT_OWNER) == new_owner || (port_status & (PORT_OWNER | PORT_CONNECT)) == 0) try = 0; else { port_status ^= PORT_OWNER; port_status &= ~(PORT_PE | PORT_RWC_BITS); ehci_writel(ehci, port_status, status_reg); } spin_unlock_irq(&ehci->lock); if (try > 1) msleep(5); } } /*-------------------------------------------------------------------------*/ static int check_reset_complete ( struct ehci_hcd *ehci, int index, u32 __iomem *status_reg, int port_status ) { if (!(port_status & PORT_CONNECT)) return port_status; /* if reset finished and it's still not enabled -- handoff */ if (!(port_status & PORT_PE)) { /* with integrated TT, there's nobody to hand it to! */ if (ehci_is_TDI(ehci)) { ehci_dbg (ehci, "Failed to enable port %d on root hub TT\n", index+1); return port_status; } ehci_dbg (ehci, "port %d full speed --> companion\n", index + 1); // what happens if HCS_N_CC(params) == 0 ? port_status |= PORT_OWNER; port_status &= ~PORT_RWC_BITS; ehci_writel(ehci, port_status, status_reg); /* ensure 440EPX ohci controller state is operational */ if (ehci->has_amcc_usb23) set_ohci_hcfs(ehci, 1); } else { ehci_dbg(ehci, "port %d reset complete, port enabled\n", index + 1); /* ensure 440EPx ohci controller state is suspended */ if (ehci->has_amcc_usb23) set_ohci_hcfs(ehci, 0); } return port_status; } /*-------------------------------------------------------------------------*/ /* build "status change" packet (one or two bytes) from HC registers */ static int ehci_hub_status_data (struct usb_hcd *hcd, char *buf) { struct ehci_hcd *ehci = hcd_to_ehci (hcd); u32 temp, status; u32 mask; int ports, i, retval = 1; unsigned long flags; u32 ppcd = ~0; /* init status to no-changes */ buf [0] = 0; ports = HCS_N_PORTS (ehci->hcs_params); if (ports > 7) { buf [1] = 0; retval++; } /* Inform the core about resumes-in-progress by returning * a non-zero value even if there are no status changes. */ status = ehci->resuming_ports; /* Some boards (mostly VIA?) report bogus overcurrent indications, * causing massive log spam unless we completely ignore them. It * may be relevant that VIA VT8235 controllers, where PORT_POWER is * always set, seem to clear PORT_OCC and PORT_CSC when writing to * PORT_POWER; that's surprising, but maybe within-spec. */ if (!ignore_oc && !ehci->spurious_oc) mask = PORT_CSC | PORT_PEC | PORT_OCC; else mask = PORT_CSC | PORT_PEC; // PORT_RESUME from hardware ~= PORT_STAT_C_SUSPEND /* no hub change reports (bit 0) for now (power, ...) */ /* port N changes (bit N)? */ spin_lock_irqsave (&ehci->lock, flags); /* get per-port change detect bits */ if (ehci->has_ppcd) ppcd = ehci_readl(ehci, &ehci->regs->status) >> 16; for (i = 0; i < ports; i++) { /* leverage per-port change bits feature */ if (ppcd & (1 << i)) temp = ehci_readl(ehci, &ehci->regs->port_status[i]); else temp = 0; /* * Return status information even for ports with OWNER set. * Otherwise hub_wq wouldn't see the disconnect event when a * high-speed device is switched over to the companion * controller by the user. */ if ((temp & mask) != 0 || test_bit(i, &ehci->port_c_suspend) || (ehci->reset_done[i] && time_after_eq( jiffies, ehci->reset_done[i])) || ehci_has_ci_pec_bug(ehci, temp)) { if (i < 7) buf [0] |= 1 << (i + 1); else buf [1] |= 1 << (i - 7); status = STS_PCD; } } /* If a resume is in progress, make sure it can finish */ if (ehci->resuming_ports) mod_timer(&hcd->rh_timer, jiffies + msecs_to_jiffies(25)); spin_unlock_irqrestore (&ehci->lock, flags); return status ? retval : 0; } /*-------------------------------------------------------------------------*/ static void ehci_hub_descriptor ( struct ehci_hcd *ehci, struct usb_hub_descriptor *desc ) { int ports = HCS_N_PORTS (ehci->hcs_params); u16 temp; desc->bDescriptorType = USB_DT_HUB; desc->bPwrOn2PwrGood = 10; /* ehci 1.0, 2.3.9 says 20ms max */ desc->bHubContrCurrent = 0; desc->bNbrPorts = ports; temp = 1 + (ports / 8); desc->bDescLength = 7 + 2 * temp; /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */ memset(&desc->u.hs.DeviceRemovable[0], 0, temp); memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp); temp = HUB_CHAR_INDV_PORT_OCPM; /* per-port overcurrent reporting */ if (HCS_PPC (ehci->hcs_params)) temp |= HUB_CHAR_INDV_PORT_LPSM; /* per-port power control */ else temp |= HUB_CHAR_NO_LPSM; /* no power switching */ #if 0 // re-enable when we support USB_PORT_FEAT_INDICATOR below. if (HCS_INDICATOR (ehci->hcs_params)) temp |= HUB_CHAR_PORTIND; /* per-port indicators (LEDs) */ #endif desc->wHubCharacteristics = cpu_to_le16(temp); } /*-------------------------------------------------------------------------*/ int ehci_hub_control( struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength ) { struct ehci_hcd *ehci = hcd_to_ehci (hcd); int ports = HCS_N_PORTS (ehci->hcs_params); u32 __iomem *status_reg, *hostpc_reg; u32 temp, temp1, status; unsigned long flags; int retval = 0; unsigned selector; /* * Avoid out-of-bounds values while calculating the port index * from wIndex. The compiler doesn't like pointers to invalid * addresses, even if they are never used. */ temp = (wIndex - 1) & 0xff; if (temp >= HCS_N_PORTS_MAX) temp = 0; status_reg = &ehci->regs->port_status[temp]; hostpc_reg = &ehci->regs->hostpc[temp]; /* * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR. * HCS_INDICATOR may say we can change LEDs to off/amber/green. * (track current state ourselves) ... blink for diagnostics, * power, "this is the one", etc. EHCI spec supports this. */ spin_lock_irqsave (&ehci->lock, flags); switch (typeReq) { case ClearHubFeature: switch (wValue) { case C_HUB_LOCAL_POWER: case C_HUB_OVER_CURRENT: /* no hub-wide feature/status flags */ break; default: goto error; } break; case ClearPortFeature: if (!wIndex || wIndex > ports) goto error; wIndex--; temp = ehci_readl(ehci, status_reg); temp &= ~PORT_RWC_BITS; /* * Even if OWNER is set, so the port is owned by the * companion controller, hub_wq needs to be able to clear * the port-change status bits (especially * USB_PORT_STAT_C_CONNECTION). */ switch (wValue) { case USB_PORT_FEAT_ENABLE: ehci_writel(ehci, temp & ~PORT_PE, status_reg); break; case USB_PORT_FEAT_C_ENABLE: ehci_writel(ehci, temp | PORT_PEC, status_reg); break; case USB_PORT_FEAT_SUSPEND: if (temp & PORT_RESET) goto error; if (ehci->no_selective_suspend) break; #ifdef CONFIG_USB_OTG if ((hcd->self.otg_port == (wIndex + 1)) && hcd->self.b_hnp_enable) { otg_start_hnp(hcd->usb_phy->otg); break; } #endif if (!(temp & PORT_SUSPEND)) break; if ((temp & PORT_PE) == 0) goto error; /* clear phy low-power mode before resume */ if (ehci->has_tdi_phy_lpm) { temp1 = ehci_readl(ehci, hostpc_reg); ehci_writel(ehci, temp1 & ~HOSTPC_PHCD, hostpc_reg); spin_unlock_irqrestore(&ehci->lock, flags); msleep(5);/* wait to leave low-power mode */ spin_lock_irqsave(&ehci->lock, flags); } /* resume signaling for 20 msec */ temp &= ~PORT_WAKE_BITS; ehci_writel(ehci, temp | PORT_RESUME, status_reg); ehci->reset_done[wIndex] = jiffies + msecs_to_jiffies(USB_RESUME_TIMEOUT); set_bit(wIndex, &ehci->resuming_ports); usb_hcd_start_port_resume(&hcd->self, wIndex); break; case USB_PORT_FEAT_C_SUSPEND: clear_bit(wIndex, &ehci->port_c_suspend); break; case USB_PORT_FEAT_POWER: if (HCS_PPC(ehci->hcs_params)) { spin_unlock_irqrestore(&ehci->lock, flags); ehci_port_power(ehci, wIndex, false); spin_lock_irqsave(&ehci->lock, flags); } break; case USB_PORT_FEAT_C_CONNECTION: ehci_writel(ehci, temp | PORT_CSC, status_reg); break; case USB_PORT_FEAT_C_OVER_CURRENT: ehci_writel(ehci, temp | PORT_OCC, status_reg); break; case USB_PORT_FEAT_C_RESET: /* GetPortStatus clears reset */ break; default: goto error; } ehci_readl(ehci, &ehci->regs->command); /* unblock posted write */ break; case GetHubDescriptor: ehci_hub_descriptor (ehci, (struct usb_hub_descriptor *) buf); break; case GetHubStatus: /* no hub-wide feature/status flags */ memset (buf, 0, 4); //cpu_to_le32s ((u32 *) buf); break; case GetPortStatus: if (!wIndex || wIndex > ports) goto error; wIndex--; status = 0; temp = ehci_readl(ehci, status_reg); // wPortChange bits if (temp & PORT_CSC) status |= USB_PORT_STAT_C_CONNECTION << 16; if (temp & PORT_PEC) status |= USB_PORT_STAT_C_ENABLE << 16; if (ehci_has_ci_pec_bug(ehci, temp)) { status |= USB_PORT_STAT_C_ENABLE << 16; ehci_info(ehci, "PE is cleared by HW port:%d PORTSC:%08x\n", wIndex + 1, temp); } if ((temp & PORT_OCC) && (!ignore_oc && !ehci->spurious_oc)){ status |= USB_PORT_STAT_C_OVERCURRENT << 16; /* * Hubs should disable port power on over-current. * However, not all EHCI implementations do this * automatically, even if they _do_ support per-port * power switching; they're allowed to just limit the * current. hub_wq will turn the power back on. */ if (((temp & PORT_OC) || (ehci->need_oc_pp_cycle)) && HCS_PPC(ehci->hcs_params)) { spin_unlock_irqrestore(&ehci->lock, flags); ehci_port_power(ehci, wIndex, false); spin_lock_irqsave(&ehci->lock, flags); temp = ehci_readl(ehci, status_reg); } } /* no reset or resume pending */ if (!ehci->reset_done[wIndex]) { /* Remote Wakeup received? */ if (temp & PORT_RESUME) { /* resume signaling for 20 msec */ ehci->reset_done[wIndex] = jiffies + msecs_to_jiffies(20); usb_hcd_start_port_resume(&hcd->self, wIndex); set_bit(wIndex, &ehci->resuming_ports); /* check the port again */ mod_timer(&ehci_to_hcd(ehci)->rh_timer, ehci->reset_done[wIndex]); } /* reset or resume not yet complete */ } else if (!time_after_eq(jiffies, ehci->reset_done[wIndex])) { ; /* wait until it is complete */ /* resume completed */ } else if (test_bit(wIndex, &ehci->resuming_ports)) { clear_bit(wIndex, &ehci->suspended_ports); set_bit(wIndex, &ehci->port_c_suspend); ehci->reset_done[wIndex] = 0; usb_hcd_end_port_resume(&hcd->self, wIndex); /* stop resume signaling */ temp &= ~(PORT_RWC_BITS | PORT_SUSPEND | PORT_RESUME); ehci_writel(ehci, temp, status_reg); clear_bit(wIndex, &ehci->resuming_ports); retval = ehci_handshake(ehci, status_reg, PORT_RESUME, 0, 2000 /* 2msec */); if (retval != 0) { ehci_err(ehci, "port %d resume error %d\n", wIndex + 1, retval); goto error; } temp = ehci_readl(ehci, status_reg); /* whoever resets must GetPortStatus to complete it!! */ } else { status |= USB_PORT_STAT_C_RESET << 16; ehci->reset_done [wIndex] = 0; /* force reset to complete */ ehci_writel(ehci, temp & ~(PORT_RWC_BITS | PORT_RESET), status_reg); /* REVISIT: some hardware needs 550+ usec to clear * this bit; seems too long to spin routinely... */ retval = ehci_handshake(ehci, status_reg, PORT_RESET, 0, 1000); if (retval != 0) { ehci_err (ehci, "port %d reset error %d\n", wIndex + 1, retval); goto error; } /* see what we found out */ temp = check_reset_complete (ehci, wIndex, status_reg, ehci_readl(ehci, status_reg)); } /* transfer dedicated ports to the companion hc */ if ((temp & PORT_CONNECT) && test_bit(wIndex, &ehci->companion_ports)) { temp &= ~PORT_RWC_BITS; temp |= PORT_OWNER; ehci_writel(ehci, temp, status_reg); ehci_dbg(ehci, "port %d --> companion\n", wIndex + 1); temp = ehci_readl(ehci, status_reg); } /* * Even if OWNER is set, there's no harm letting hub_wq * see the wPortStatus values (they should all be 0 except * for PORT_POWER anyway). */ if (temp & PORT_CONNECT) { status |= USB_PORT_STAT_CONNECTION; // status may be from integrated TT if (ehci->has_hostpc) { temp1 = ehci_readl(ehci, hostpc_reg); status |= ehci_port_speed(ehci, temp1); } else status |= ehci_port_speed(ehci, temp); } if (temp & PORT_PE) status |= USB_PORT_STAT_ENABLE; /* maybe the port was unsuspended without our knowledge */ if (temp & (PORT_SUSPEND|PORT_RESUME)) { status |= USB_PORT_STAT_SUSPEND; } else if (test_bit(wIndex, &ehci->suspended_ports)) { clear_bit(wIndex, &ehci->suspended_ports); clear_bit(wIndex, &ehci->resuming_ports); ehci->reset_done[wIndex] = 0; if (temp & PORT_PE) set_bit(wIndex, &ehci->port_c_suspend); usb_hcd_end_port_resume(&hcd->self, wIndex); } if (temp & PORT_OC) status |= USB_PORT_STAT_OVERCURRENT; if (temp & PORT_RESET) status |= USB_PORT_STAT_RESET; if (temp & PORT_POWER) status |= USB_PORT_STAT_POWER; if (test_bit(wIndex, &ehci->port_c_suspend)) status |= USB_PORT_STAT_C_SUSPEND << 16; if (status & ~0xffff) /* only if wPortChange is interesting */ dbg_port(ehci, "GetStatus", wIndex + 1, temp); put_unaligned_le32(status, buf); break; case SetHubFeature: switch (wValue) { case C_HUB_LOCAL_POWER: case C_HUB_OVER_CURRENT: /* no hub-wide feature/status flags */ break; default: goto error; } break; case SetPortFeature: selector = wIndex >> 8; wIndex &= 0xff; if (unlikely(ehci->debug)) { /* If the debug port is active any port * feature requests should get denied */ if (wIndex == HCS_DEBUG_PORT(ehci->hcs_params) && (readl(&ehci->debug->control) & DBGP_ENABLED)) { retval = -ENODEV; goto error_exit; } } if (!wIndex || wIndex > ports) goto error; wIndex--; temp = ehci_readl(ehci, status_reg); if (temp & PORT_OWNER) break; temp &= ~PORT_RWC_BITS; switch (wValue) { case USB_PORT_FEAT_SUSPEND: if (ehci->no_selective_suspend) break; if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) goto error; /* After above check the port must be connected. * Set appropriate bit thus could put phy into low power * mode if we have tdi_phy_lpm feature */ temp &= ~PORT_WKCONN_E; temp |= PORT_WKDISC_E | PORT_WKOC_E; ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); if (ehci->has_tdi_phy_lpm) { spin_unlock_irqrestore(&ehci->lock, flags); msleep(5);/* 5ms for HCD enter low pwr mode */ spin_lock_irqsave(&ehci->lock, flags); temp1 = ehci_readl(ehci, hostpc_reg); ehci_writel(ehci, temp1 | HOSTPC_PHCD, hostpc_reg); temp1 = ehci_readl(ehci, hostpc_reg); ehci_dbg(ehci, "Port%d phy low pwr mode %s\n", wIndex, (temp1 & HOSTPC_PHCD) ? "succeeded" : "failed"); } if (ehci_has_fsl_susp_errata(ehci)) { /* 10ms for HCD enter suspend */ spin_unlock_irqrestore(&ehci->lock, flags); usleep_range(10000, 20000); spin_lock_irqsave(&ehci->lock, flags); } set_bit(wIndex, &ehci->suspended_ports); break; case USB_PORT_FEAT_POWER: if (HCS_PPC(ehci->hcs_params)) { spin_unlock_irqrestore(&ehci->lock, flags); ehci_port_power(ehci, wIndex, true); spin_lock_irqsave(&ehci->lock, flags); } break; case USB_PORT_FEAT_RESET: if (temp & (PORT_SUSPEND|PORT_RESUME)) goto error; /* line status bits may report this as low speed, * which can be fine if this root hub has a * transaction translator built in. */ if ((temp & (PORT_PE|PORT_CONNECT)) == PORT_CONNECT && !ehci_is_TDI(ehci) && PORT_USB11 (temp)) { ehci_dbg (ehci, "port %d low speed --> companion\n", wIndex + 1); temp |= PORT_OWNER; } else { temp |= PORT_RESET; temp &= ~PORT_PE; /* * caller must wait, then call GetPortStatus * usb 2.0 spec says 50 ms resets on root */ ehci->reset_done [wIndex] = jiffies + msecs_to_jiffies (50); /* * Force full-speed connect for FSL high-speed * erratum; disable HS Chirp by setting PFSC bit */ if (ehci_has_fsl_hs_errata(ehci)) temp |= (1 << PORTSC_FSL_PFSC); } ehci_writel(ehci, temp, status_reg); break; /* For downstream facing ports (these): one hub port is put * into test mode according to USB2 11.24.2.13, then the hub * must be reset (which for root hub now means rmmod+modprobe, * or else system reboot). See EHCI 2.3.9 and 4.14 for info * about the EHCI-specific stuff. */ case USB_PORT_FEAT_TEST: #ifdef CONFIG_USB_HCD_TEST_MODE if (selector == EHSET_TEST_SINGLE_STEP_SET_FEATURE) { spin_unlock_irqrestore(&ehci->lock, flags); retval = ehset_single_step_set_feature(hcd, wIndex + 1); spin_lock_irqsave(&ehci->lock, flags); break; } #endif if (!selector || selector > 5) goto error; spin_unlock_irqrestore(&ehci->lock, flags); ehci_quiesce(ehci); spin_lock_irqsave(&ehci->lock, flags); /* Put all enabled ports into suspend */ while (ports--) { u32 __iomem *sreg = &ehci->regs->port_status[ports]; temp = ehci_readl(ehci, sreg) & ~PORT_RWC_BITS; if (temp & PORT_PE) ehci_writel(ehci, temp | PORT_SUSPEND, sreg); } spin_unlock_irqrestore(&ehci->lock, flags); ehci_halt(ehci); spin_lock_irqsave(&ehci->lock, flags); temp = ehci_readl(ehci, status_reg); temp |= selector << 16; ehci_writel(ehci, temp, status_reg); break; default: goto error; } ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */ break; default: error: /* "stall" on error */ retval = -EPIPE; } error_exit: spin_unlock_irqrestore (&ehci->lock, flags); return retval; } EXPORT_SYMBOL_GPL(ehci_hub_control); static void ehci_relinquish_port(struct usb_hcd *hcd, int portnum) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); if (ehci_is_TDI(ehci)) return; set_owner(ehci, --portnum, PORT_OWNER); } static int ehci_port_handed_over(struct usb_hcd *hcd, int portnum) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); u32 __iomem *reg; if (ehci_is_TDI(ehci)) return 0; reg = &ehci->regs->port_status[portnum - 1]; return ehci_readl(ehci, reg) & PORT_OWNER; } static int ehci_port_power(struct ehci_hcd *ehci, int portnum, bool enable) { struct usb_hcd *hcd = ehci_to_hcd(ehci); u32 __iomem *status_reg = &ehci->regs->port_status[portnum]; u32 temp = ehci_readl(ehci, status_reg) & ~PORT_RWC_BITS; if (enable) ehci_writel(ehci, temp | PORT_POWER, status_reg); else ehci_writel(ehci, temp & ~PORT_POWER, status_reg); if (hcd->driver->port_power) hcd->driver->port_power(hcd, portnum, enable); return 0; }
1 12 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/fsnotify_backend.h> #include <linux/inotify.h> #include <linux/slab.h> /* struct kmem_cache */ struct inotify_event_info { struct fsnotify_event fse; u32 mask; int wd; u32 sync_cookie; int name_len; char name[]; }; struct inotify_inode_mark { struct fsnotify_mark fsn_mark; int wd; }; static inline struct inotify_event_info *INOTIFY_E(struct fsnotify_event *fse) { return container_of(fse, struct inotify_event_info, fse); } /* * INOTIFY_USER_FLAGS represents all of the mask bits that we expose to * userspace. There is at least one bit (FS_EVENT_ON_CHILD) which is * used only internally to the kernel. */ #define INOTIFY_USER_MASK (IN_ALL_EVENTS) static inline __u32 inotify_mark_user_mask(struct fsnotify_mark *fsn_mark) { __u32 mask = fsn_mark->mask & INOTIFY_USER_MASK; if (fsn_mark->flags & FSNOTIFY_MARK_FLAG_EXCL_UNLINK) mask |= IN_EXCL_UNLINK; if (fsn_mark->flags & FSNOTIFY_MARK_FLAG_IN_ONESHOT) mask |= IN_ONESHOT; return mask; } extern void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group); extern int inotify_handle_inode_event(struct fsnotify_mark *inode_mark, u32 mask, struct inode *inode, struct inode *dir, const struct qstr *name, u32 cookie); extern const struct fsnotify_ops inotify_fsnotify_ops; extern struct kmem_cache *inotify_inode_mark_cachep; #ifdef CONFIG_INOTIFY_USER static inline void dec_inotify_instances(struct ucounts *ucounts) { dec_ucount(ucounts, UCOUNT_INOTIFY_INSTANCES); } static inline struct ucounts *inc_inotify_watches(struct ucounts *ucounts) { return inc_ucount(ucounts->ns, ucounts->uid, UCOUNT_INOTIFY_WATCHES); } static inline void dec_inotify_watches(struct ucounts *ucounts) { dec_ucount(ucounts, UCOUNT_INOTIFY_WATCHES); } #endif
2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 /* SPDX-License-Identifier: GPL-2.0-only */ #ifndef PAGE_ACTOR_H #define PAGE_ACTOR_H /* * Copyright (c) 2013 * Phillip Lougher <phillip@squashfs.org.uk> */ struct squashfs_page_actor { union { void **buffer; struct page **page; }; void *pageaddr; void *tmp_buffer; void *(*squashfs_first_page)(struct squashfs_page_actor *); void *(*squashfs_next_page)(struct squashfs_page_actor *); void (*squashfs_finish_page)(struct squashfs_page_actor *); struct page *last_page; int pages; int length; int next_page; int alloc_buffer; int returned_pages; pgoff_t next_index; }; extern struct squashfs_page_actor *squashfs_page_actor_init(void **buffer, int pages, int length); extern struct squashfs_page_actor *squashfs_page_actor_init_special( struct squashfs_sb_info *msblk, struct page **page, int pages, int length, loff_t start_index); static inline struct page *squashfs_page_actor_free(struct squashfs_page_actor *actor) { struct page *last_page = actor->next_page == actor->pages ? actor->last_page : ERR_PTR(-EIO); kfree(actor->tmp_buffer); kfree(actor); return last_page; } static inline void *squashfs_first_page(struct squashfs_page_actor *actor) { return actor->squashfs_first_page(actor); } static inline void *squashfs_next_page(struct squashfs_page_actor *actor) { return actor->squashfs_next_page(actor); } static inline void squashfs_finish_page(struct squashfs_page_actor *actor) { actor->squashfs_finish_page(actor); } static inline void squashfs_actor_nobuff(struct squashfs_page_actor *actor) { actor->alloc_buffer = 0; } #endif
13 4 13 1 12 1 11 9 13 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 // SPDX-License-Identifier: GPL-2.0-only /* iptables module for using new netfilter netlink queue * * (C) 2005 by Harald Welte <laforge@netfilter.org> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/skbuff.h> #include <linux/netfilter.h> #include <linux/netfilter_arp.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_NFQUEUE.h> #include <net/netfilter/nf_queue.h> MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); MODULE_DESCRIPTION("Xtables: packet forwarding to netlink"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_NFQUEUE"); MODULE_ALIAS("ip6t_NFQUEUE"); MODULE_ALIAS("arpt_NFQUEUE"); static u32 jhash_initval __read_mostly; static unsigned int nfqueue_tg(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_NFQ_info *tinfo = par->targinfo; return NF_QUEUE_NR(tinfo->queuenum); } static unsigned int nfqueue_tg_v1(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_NFQ_info_v1 *info = par->targinfo; u32 queue = info->queuenum; if (info->queues_total > 1) { queue = nfqueue_hash(skb, queue, info->queues_total, xt_family(par), jhash_initval); } return NF_QUEUE_NR(queue); } static unsigned int nfqueue_tg_v2(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_NFQ_info_v2 *info = par->targinfo; unsigned int ret = nfqueue_tg_v1(skb, par); if (info->bypass) ret |= NF_VERDICT_FLAG_QUEUE_BYPASS; return ret; } static int nfqueue_tg_check(const struct xt_tgchk_param *par) { const struct xt_NFQ_info_v3 *info = par->targinfo; u32 maxid; init_hashrandom(&jhash_initval); if (info->queues_total == 0) { pr_info_ratelimited("number of total queues is 0\n"); return -EINVAL; } maxid = info->queues_total - 1 + info->queuenum; if (maxid > 0xffff) { pr_info_ratelimited("number of queues (%u) out of range (got %u)\n", info->queues_total, maxid); return -ERANGE; } if (par->target->revision == 2 && info->flags > 1) return -EINVAL; if (par->target->revision == 3 && info->flags & ~NFQ_FLAG_MASK) return -EINVAL; return 0; } static unsigned int nfqueue_tg_v3(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_NFQ_info_v3 *info = par->targinfo; u32 queue = info->queuenum; int ret; if (info->queues_total > 1) { if (info->flags & NFQ_FLAG_CPU_FANOUT) { int cpu = smp_processor_id(); queue = info->queuenum + cpu % info->queues_total; } else { queue = nfqueue_hash(skb, queue, info->queues_total, xt_family(par), jhash_initval); } } ret = NF_QUEUE_NR(queue); if (info->flags & NFQ_FLAG_BYPASS) ret |= NF_VERDICT_FLAG_QUEUE_BYPASS; return ret; } static struct xt_target nfqueue_tg_reg[] __read_mostly = { { .name = "NFQUEUE", .family = NFPROTO_UNSPEC, .target = nfqueue_tg, .targetsize = sizeof(struct xt_NFQ_info), .me = THIS_MODULE, }, { .name = "NFQUEUE", .revision = 1, .family = NFPROTO_UNSPEC, .checkentry = nfqueue_tg_check, .target = nfqueue_tg_v1, .targetsize = sizeof(struct xt_NFQ_info_v1), .me = THIS_MODULE, }, { .name = "NFQUEUE", .revision = 2, .family = NFPROTO_UNSPEC, .checkentry = nfqueue_tg_check, .target = nfqueue_tg_v2, .targetsize = sizeof(struct xt_NFQ_info_v2), .me = THIS_MODULE, }, { .name = "NFQUEUE", .revision = 3, .family = NFPROTO_UNSPEC, .checkentry = nfqueue_tg_check, .target = nfqueue_tg_v3, .targetsize = sizeof(struct xt_NFQ_info_v3), .me = THIS_MODULE, }, }; static int __init nfqueue_tg_init(void) { return xt_register_targets(nfqueue_tg_reg, ARRAY_SIZE(nfqueue_tg_reg)); } static void __exit nfqueue_tg_exit(void) { xt_unregister_targets(nfqueue_tg_reg, ARRAY_SIZE(nfqueue_tg_reg)); } module_init(nfqueue_tg_init); module_exit(nfqueue_tg_exit);
8 7 6 5 4 3 2 1 2 3 8 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/netlink.h> #include <linux/netfilter.h> #include <linux/netfilter/nf_tables.h> #include <net/netfilter/nf_tables_core.h> #include <net/netfilter/nf_tables.h> struct nft_range_expr { struct nft_data data_from; struct nft_data data_to; u8 sreg; u8 len; enum nft_range_ops op:8; }; void nft_range_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_range_expr *priv = nft_expr_priv(expr); int d1, d2; d1 = memcmp(&regs->data[priv->sreg], &priv->data_from, priv->len); d2 = memcmp(&regs->data[priv->sreg], &priv->data_to, priv->len); switch (priv->op) { case NFT_RANGE_EQ: if (d1 < 0 || d2 > 0) regs->verdict.code = NFT_BREAK; break; case NFT_RANGE_NEQ: if (d1 >= 0 && d2 <= 0) regs->verdict.code = NFT_BREAK; break; } } static const struct nla_policy nft_range_policy[NFTA_RANGE_MAX + 1] = { [NFTA_RANGE_SREG] = { .type = NLA_U32 }, [NFTA_RANGE_OP] = NLA_POLICY_MAX(NLA_BE32, 255), [NFTA_RANGE_FROM_DATA] = { .type = NLA_NESTED }, [NFTA_RANGE_TO_DATA] = { .type = NLA_NESTED }, }; static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_range_expr *priv = nft_expr_priv(expr); struct nft_data_desc desc_from = { .type = NFT_DATA_VALUE, .size = sizeof(priv->data_from), }; struct nft_data_desc desc_to = { .type = NFT_DATA_VALUE, .size = sizeof(priv->data_to), }; int err; u32 op; if (!tb[NFTA_RANGE_SREG] || !tb[NFTA_RANGE_OP] || !tb[NFTA_RANGE_FROM_DATA] || !tb[NFTA_RANGE_TO_DATA]) return -EINVAL; err = nft_data_init(NULL, &priv->data_from, &desc_from, tb[NFTA_RANGE_FROM_DATA]); if (err < 0) return err; err = nft_data_init(NULL, &priv->data_to, &desc_to, tb[NFTA_RANGE_TO_DATA]); if (err < 0) goto err1; if (desc_from.len != desc_to.len) { err = -EINVAL; goto err2; } err = nft_parse_register_load(ctx, tb[NFTA_RANGE_SREG], &priv->sreg, desc_from.len); if (err < 0) goto err2; err = nft_parse_u32_check(tb[NFTA_RANGE_OP], U8_MAX, &op); if (err < 0) goto err2; switch (op) { case NFT_RANGE_EQ: case NFT_RANGE_NEQ: break; default: err = -EINVAL; goto err2; } priv->op = op; priv->len = desc_from.len; return 0; err2: nft_data_release(&priv->data_to, desc_to.type); err1: nft_data_release(&priv->data_from, desc_from.type); return err; } static int nft_range_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset) { const struct nft_range_expr *priv = nft_expr_priv(expr); if (nft_dump_register(skb, NFTA_RANGE_SREG, priv->sreg)) goto nla_put_failure; if (nla_put_be32(skb, NFTA_RANGE_OP, htonl(priv->op))) goto nla_put_failure; if (nft_data_dump(skb, NFTA_RANGE_FROM_DATA, &priv->data_from, NFT_DATA_VALUE, priv->len) < 0 || nft_data_dump(skb, NFTA_RANGE_TO_DATA, &priv->data_to, NFT_DATA_VALUE, priv->len) < 0) goto nla_put_failure; return 0; nla_put_failure: return -1; } static const struct nft_expr_ops nft_range_ops = { .type = &nft_range_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_range_expr)), .eval = nft_range_eval, .init = nft_range_init, .dump = nft_range_dump, .reduce = NFT_REDUCE_READONLY, }; struct nft_expr_type nft_range_type __read_mostly = { .name = "range", .ops = &nft_range_ops, .policy = nft_range_policy, .maxattr = NFTA_RANGE_MAX, .owner = THIS_MODULE, };
5 11 14 9 5 5 2 2 23 23 21 15 15 3 4 2 17 25 25 6 6 6 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 // SPDX-License-Identifier: GPL-2.0-or-later /* * * Bluetooth virtual HCI driver * * Copyright (C) 2000-2001 Qualcomm Incorporated * Copyright (C) 2002-2003 Maxim Krasnyansky <maxk@qualcomm.com> * Copyright (C) 2004-2006 Marcel Holtmann <marcel@holtmann.org> */ #include <linux/module.h> #include <linux/unaligned.h> #include <linux/atomic.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/poll.h> #include <linux/skbuff.h> #include <linux/miscdevice.h> #include <linux/debugfs.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #define VERSION "1.5" static bool amp; struct vhci_data { struct hci_dev *hdev; wait_queue_head_t read_wait; struct sk_buff_head readq; struct mutex open_mutex; struct delayed_work open_timeout; struct work_struct suspend_work; bool suspended; bool wakeup; __u16 msft_opcode; bool aosp_capable; atomic_t initialized; }; static int vhci_open_dev(struct hci_dev *hdev) { return 0; } static int vhci_close_dev(struct hci_dev *hdev) { struct vhci_data *data = hci_get_drvdata(hdev); skb_queue_purge(&data->readq); return 0; } static int vhci_flush(struct hci_dev *hdev) { struct vhci_data *data = hci_get_drvdata(hdev); skb_queue_purge(&data->readq); return 0; } static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { struct vhci_data *data = hci_get_drvdata(hdev); memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); skb_queue_tail(&data->readq, skb); if (atomic_read(&data->initialized)) wake_up_interruptible(&data->read_wait); return 0; } static int vhci_get_data_path_id(struct hci_dev *hdev, u8 *data_path_id) { *data_path_id = 0; return 0; } static int vhci_get_codec_config_data(struct hci_dev *hdev, __u8 type, struct bt_codec *codec, __u8 *vnd_len, __u8 **vnd_data) { if (type != ESCO_LINK) return -EINVAL; *vnd_len = 0; *vnd_data = NULL; return 0; } static bool vhci_wakeup(struct hci_dev *hdev) { struct vhci_data *data = hci_get_drvdata(hdev); return data->wakeup; } static ssize_t force_suspend_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *data = file->private_data; char buf[3]; buf[0] = data->suspended ? 'Y' : 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static void vhci_suspend_work(struct work_struct *work) { struct vhci_data *data = container_of(work, struct vhci_data, suspend_work); if (data->suspended) hci_suspend_dev(data->hdev); else hci_resume_dev(data->hdev); } static ssize_t force_suspend_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *data = file->private_data; bool enable; int err; err = kstrtobool_from_user(user_buf, count, &enable); if (err) return err; if (data->suspended == enable) return -EALREADY; data->suspended = enable; schedule_work(&data->suspend_work); return count; } static const struct file_operations force_suspend_fops = { .open = simple_open, .read = force_suspend_read, .write = force_suspend_write, .llseek = default_llseek, }; static ssize_t force_wakeup_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *data = file->private_data; char buf[3]; buf[0] = data->wakeup ? 'Y' : 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static ssize_t force_wakeup_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *data = file->private_data; bool enable; int err; err = kstrtobool_from_user(user_buf, count, &enable); if (err) return err; if (data->wakeup == enable) return -EALREADY; data->wakeup = enable; return count; } static const struct file_operations force_wakeup_fops = { .open = simple_open, .read = force_wakeup_read, .write = force_wakeup_write, .llseek = default_llseek, }; static int msft_opcode_set(void *data, u64 val) { struct vhci_data *vhci = data; if (val > 0xffff || hci_opcode_ogf(val) != 0x3f) return -EINVAL; if (vhci->msft_opcode) return -EALREADY; vhci->msft_opcode = val; return 0; } static int msft_opcode_get(void *data, u64 *val) { struct vhci_data *vhci = data; *val = vhci->msft_opcode; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(msft_opcode_fops, msft_opcode_get, msft_opcode_set, "%llu\n"); static ssize_t aosp_capable_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *vhci = file->private_data; char buf[3]; buf[0] = vhci->aosp_capable ? 'Y' : 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static ssize_t aosp_capable_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *vhci = file->private_data; bool enable; int err; err = kstrtobool_from_user(user_buf, count, &enable); if (err) return err; if (!enable) return -EINVAL; if (vhci->aosp_capable) return -EALREADY; vhci->aosp_capable = enable; return count; } static const struct file_operations aosp_capable_fops = { .open = simple_open, .read = aosp_capable_read, .write = aosp_capable_write, .llseek = default_llseek, }; static int vhci_setup(struct hci_dev *hdev) { struct vhci_data *vhci = hci_get_drvdata(hdev); if (vhci->msft_opcode) hci_set_msft_opcode(hdev, vhci->msft_opcode); if (vhci->aosp_capable) hci_set_aosp_capable(hdev); return 0; } static void vhci_coredump(struct hci_dev *hdev) { /* No need to do anything */ } static void vhci_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb) { const char *buf; buf = "Controller Name: vhci_ctrl\n"; skb_put_data(skb, buf, strlen(buf)); buf = "Firmware Version: vhci_fw\n"; skb_put_data(skb, buf, strlen(buf)); buf = "Driver: vhci_drv\n"; skb_put_data(skb, buf, strlen(buf)); buf = "Vendor: vhci\n"; skb_put_data(skb, buf, strlen(buf)); } #define MAX_COREDUMP_LINE_LEN 40 struct devcoredump_test_data { enum devcoredump_state state; unsigned int timeout; char data[MAX_COREDUMP_LINE_LEN]; }; static inline void force_devcd_timeout(struct hci_dev *hdev, unsigned int timeout) { #ifdef CONFIG_DEV_COREDUMP hdev->dump.timeout = secs_to_jiffies(timeout); #endif } static ssize_t force_devcd_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *data = file->private_data; struct hci_dev *hdev = data->hdev; struct sk_buff *skb = NULL; struct devcoredump_test_data dump_data; size_t data_size; int ret; if (count < offsetof(struct devcoredump_test_data, data) || count > sizeof(dump_data)) return -EINVAL; if (copy_from_user(&dump_data, user_buf, count)) return -EFAULT; data_size = count - offsetof(struct devcoredump_test_data, data); skb = alloc_skb(data_size, GFP_ATOMIC); if (!skb) return -ENOMEM; skb_put_data(skb, &dump_data.data, data_size); hci_devcd_register(hdev, vhci_coredump, vhci_coredump_hdr, NULL); /* Force the devcoredump timeout */ if (dump_data.timeout) force_devcd_timeout(hdev, dump_data.timeout); ret = hci_devcd_init(hdev, skb->len); if (ret) { BT_ERR("Failed to generate devcoredump"); kfree_skb(skb); return ret; } hci_devcd_append(hdev, skb); switch (dump_data.state) { case HCI_DEVCOREDUMP_DONE: hci_devcd_complete(hdev); break; case HCI_DEVCOREDUMP_ABORT: hci_devcd_abort(hdev); break; case HCI_DEVCOREDUMP_TIMEOUT: /* Do nothing */ break; default: return -EINVAL; } return count; } static const struct file_operations force_devcoredump_fops = { .open = simple_open, .write = force_devcd_write, }; static int __vhci_create_device(struct vhci_data *data, __u8 opcode) { struct hci_dev *hdev; struct sk_buff *skb; if (data->hdev) return -EBADFD; /* bits 2-5 are reserved (must be zero) */ if (opcode & 0x3c) return -EINVAL; skb = bt_skb_alloc(4, GFP_KERNEL); if (!skb) return -ENOMEM; hdev = hci_alloc_dev(); if (!hdev) { kfree_skb(skb); return -ENOMEM; } data->hdev = hdev; hdev->bus = HCI_VIRTUAL; hci_set_drvdata(hdev, data); hdev->open = vhci_open_dev; hdev->close = vhci_close_dev; hdev->flush = vhci_flush; hdev->send = vhci_send_frame; hdev->get_data_path_id = vhci_get_data_path_id; hdev->get_codec_config_data = vhci_get_codec_config_data; hdev->wakeup = vhci_wakeup; hdev->setup = vhci_setup; set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); set_bit(HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED, &hdev->quirks); /* bit 6 is for external configuration */ if (opcode & 0x40) set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks); /* bit 7 is for raw device */ if (opcode & 0x80) set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); if (hci_register_dev(hdev) < 0) { BT_ERR("Can't register HCI device"); hci_free_dev(hdev); data->hdev = NULL; kfree_skb(skb); return -EBUSY; } debugfs_create_file("force_suspend", 0644, hdev->debugfs, data, &force_suspend_fops); debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data, &force_wakeup_fops); if (IS_ENABLED(CONFIG_BT_MSFTEXT)) debugfs_create_file("msft_opcode", 0644, hdev->debugfs, data, &msft_opcode_fops); if (IS_ENABLED(CONFIG_BT_AOSPEXT)) debugfs_create_file("aosp_capable", 0644, hdev->debugfs, data, &aosp_capable_fops); debugfs_create_file("force_devcoredump", 0644, hdev->debugfs, data, &force_devcoredump_fops); hci_skb_pkt_type(skb) = HCI_VENDOR_PKT; skb_put_u8(skb, 0xff); skb_put_u8(skb, opcode); put_unaligned_le16(hdev->id, skb_put(skb, 2)); skb_queue_head(&data->readq, skb); atomic_inc(&data->initialized); wake_up_interruptible(&data->read_wait); return 0; } static int vhci_create_device(struct vhci_data *data, __u8 opcode) { int err; mutex_lock(&data->open_mutex); err = __vhci_create_device(data, opcode); mutex_unlock(&data->open_mutex); return err; } static inline ssize_t vhci_get_user(struct vhci_data *data, struct iov_iter *from) { size_t len = iov_iter_count(from); struct sk_buff *skb; __u8 pkt_type, opcode; int ret; if (len < 2 || len > HCI_MAX_FRAME_SIZE) return -EINVAL; skb = bt_skb_alloc(len, GFP_KERNEL); if (!skb) return -ENOMEM; if (!copy_from_iter_full(skb_put(skb, len), len, from)) { kfree_skb(skb); return -EFAULT; } pkt_type = *((__u8 *) skb->data); skb_pull(skb, 1); switch (pkt_type) { case HCI_EVENT_PKT: case HCI_ACLDATA_PKT: case HCI_SCODATA_PKT: case HCI_ISODATA_PKT: if (!data->hdev) { kfree_skb(skb); return -ENODEV; } hci_skb_pkt_type(skb) = pkt_type; ret = hci_recv_frame(data->hdev, skb); break; case HCI_VENDOR_PKT: cancel_delayed_work_sync(&data->open_timeout); opcode = *((__u8 *) skb->data); skb_pull(skb, 1); if (skb->len > 0) { kfree_skb(skb); return -EINVAL; } kfree_skb(skb); ret = vhci_create_device(data, opcode); break; default: kfree_skb(skb); return -EINVAL; } return (ret < 0) ? ret : len; } static inline ssize_t vhci_put_user(struct vhci_data *data, struct sk_buff *skb, char __user *buf, int count) { char __user *ptr = buf; int len; len = min_t(unsigned int, skb->len, count); if (copy_to_user(ptr, skb->data, len)) return -EFAULT; if (!data->hdev) return len; data->hdev->stat.byte_tx += len; switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: data->hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: data->hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: data->hdev->stat.sco_tx++; break; } return len; } static ssize_t vhci_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct vhci_data *data = file->private_data; struct sk_buff *skb; ssize_t ret = 0; while (count) { skb = skb_dequeue(&data->readq); if (skb) { ret = vhci_put_user(data, skb, buf, count); if (ret < 0) skb_queue_head(&data->readq, skb); else kfree_skb(skb); break; } if (file->f_flags & O_NONBLOCK) { ret = -EAGAIN; break; } ret = wait_event_interruptible(data->read_wait, !skb_queue_empty(&data->readq)); if (ret < 0) break; } return ret; } static ssize_t vhci_write(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct vhci_data *data = file->private_data; return vhci_get_user(data, from); } static __poll_t vhci_poll(struct file *file, poll_table *wait) { struct vhci_data *data = file->private_data; poll_wait(file, &data->read_wait, wait); if (!skb_queue_empty(&data->readq)) return EPOLLIN | EPOLLRDNORM; return EPOLLOUT | EPOLLWRNORM; } static void vhci_open_timeout(struct work_struct *work) { struct vhci_data *data = container_of(work, struct vhci_data, open_timeout.work); vhci_create_device(data, 0x00); } static int vhci_open(struct inode *inode, struct file *file) { struct vhci_data *data; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; skb_queue_head_init(&data->readq); init_waitqueue_head(&data->read_wait); mutex_init(&data->open_mutex); INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout); INIT_WORK(&data->suspend_work, vhci_suspend_work); file->private_data = data; nonseekable_open(inode, file); schedule_delayed_work(&data->open_timeout, secs_to_jiffies(1)); return 0; } static int vhci_release(struct inode *inode, struct file *file) { struct vhci_data *data = file->private_data; struct hci_dev *hdev; cancel_delayed_work_sync(&data->open_timeout); flush_work(&data->suspend_work); hdev = data->hdev; if (hdev) { hci_unregister_dev(hdev); hci_free_dev(hdev); } skb_queue_purge(&data->readq); file->private_data = NULL; kfree(data); return 0; } static const struct file_operations vhci_fops = { .owner = THIS_MODULE, .read = vhci_read, .write_iter = vhci_write, .poll = vhci_poll, .open = vhci_open, .release = vhci_release, }; static struct miscdevice vhci_miscdev = { .name = "vhci", .fops = &vhci_fops, .minor = VHCI_MINOR, }; module_misc_device(vhci_miscdev); module_param(amp, bool, 0644); MODULE_PARM_DESC(amp, "Create AMP controller device"); MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_ALIAS("devname:vhci"); MODULE_ALIAS_MISCDEV(VHCI_MINOR);
35 35 35 3 3 3 3 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 // SPDX-License-Identifier: GPL-2.0-only /* * RDMA resource limiting controller for cgroups. * * Used to allow a cgroup hierarchy to stop processes from consuming * additional RDMA resources after a certain limit is reached. * * Copyright (C) 2016 Parav Pandit <pandit.parav@gmail.com> */ #include <linux/bitops.h> #include <linux/slab.h> #include <linux/seq_file.h> #include <linux/cgroup.h> #include <linux/parser.h> #include <linux/cgroup_rdma.h> #define RDMACG_MAX_STR "max" /* * Protects list of resource pools maintained on per cgroup basis * and rdma device list. */ static DEFINE_MUTEX(rdmacg_mutex); static LIST_HEAD(rdmacg_devices); enum rdmacg_file_type { RDMACG_RESOURCE_TYPE_MAX, RDMACG_RESOURCE_TYPE_STAT, }; /* * resource table definition as to be seen by the user. * Need to add entries to it when more resources are * added/defined at IB verb/core layer. */ static char const *rdmacg_resource_names[] = { [RDMACG_RESOURCE_HCA_HANDLE] = "hca_handle", [RDMACG_RESOURCE_HCA_OBJECT] = "hca_object", }; /* resource tracker for each resource of rdma cgroup */ struct rdmacg_resource { int max; int usage; }; /* * resource pool object which represents per cgroup, per device * resources. There are multiple instances of this object per cgroup, * therefore it cannot be embedded within rdma_cgroup structure. It * is maintained as list. */ struct rdmacg_resource_pool { struct rdmacg_device *device; struct rdmacg_resource resources[RDMACG_RESOURCE_MAX]; struct list_head cg_node; struct list_head dev_node; /* count active user tasks of this pool */ u64 usage_sum; /* total number counts which are set to max */ int num_max_cnt; }; static struct rdma_cgroup *css_rdmacg(struct cgroup_subsys_state *css) { return container_of(css, struct rdma_cgroup, css); } static struct rdma_cgroup *parent_rdmacg(struct rdma_cgroup *cg) { return css_rdmacg(cg->css.parent); } static inline struct rdma_cgroup *get_current_rdmacg(void) { return css_rdmacg(task_get_css(current, rdma_cgrp_id)); } static void set_resource_limit(struct rdmacg_resource_pool *rpool, int index, int new_max) { if (new_max == S32_MAX) { if (rpool->resources[index].max != S32_MAX) rpool->num_max_cnt++; } else { if (rpool->resources[index].max == S32_MAX) rpool->num_max_cnt--; } rpool->resources[index].max = new_max; } static void set_all_resource_max_limit(struct rdmacg_resource_pool *rpool) { int i; for (i = 0; i < RDMACG_RESOURCE_MAX; i++) set_resource_limit(rpool, i, S32_MAX); } static void free_cg_rpool_locked(struct rdmacg_resource_pool *rpool) { lockdep_assert_held(&rdmacg_mutex); list_del(&rpool->cg_node); list_del(&rpool->dev_node); kfree(rpool); } static struct rdmacg_resource_pool * find_cg_rpool_locked(struct rdma_cgroup *cg, struct rdmacg_device *device) { struct rdmacg_resource_pool *pool; lockdep_assert_held(&rdmacg_mutex); list_for_each_entry(pool, &cg->rpools, cg_node) if (pool->device == device) return pool; return NULL; } static struct rdmacg_resource_pool * get_cg_rpool_locked(struct rdma_cgroup *cg, struct rdmacg_device *device) { struct rdmacg_resource_pool *rpool; rpool = find_cg_rpool_locked(cg, device); if (rpool) return rpool; rpool = kzalloc(sizeof(*rpool), GFP_KERNEL); if (!rpool) return ERR_PTR(-ENOMEM); rpool->device = device; set_all_resource_max_limit(rpool); INIT_LIST_HEAD(&rpool->cg_node); INIT_LIST_HEAD(&rpool->dev_node); list_add_tail(&rpool->cg_node, &cg->rpools); list_add_tail(&rpool->dev_node, &device->rpools); return rpool; } /** * uncharge_cg_locked - uncharge resource for rdma cgroup * @cg: pointer to cg to uncharge and all parents in hierarchy * @device: pointer to rdmacg device * @index: index of the resource to uncharge in cg (resource pool) * * It also frees the resource pool which was created as part of * charging operation when there are no resources attached to * resource pool. */ static void uncharge_cg_locked(struct rdma_cgroup *cg, struct rdmacg_device *device, enum rdmacg_resource_type index) { struct rdmacg_resource_pool *rpool; rpool = find_cg_rpool_locked(cg, device); /* * rpool cannot be null at this stage. Let kernel operate in case * if there a bug in IB stack or rdma controller, instead of crashing * the system. */ if (unlikely(!rpool)) { pr_warn("Invalid device %p or rdma cgroup %p\n", cg, device); return; } rpool->resources[index].usage--; /* * A negative count (or overflow) is invalid, * it indicates a bug in the rdma controller. */ WARN_ON_ONCE(rpool->resources[index].usage < 0); rpool->usage_sum--; if (rpool->usage_sum == 0 && rpool->num_max_cnt == RDMACG_RESOURCE_MAX) { /* * No user of the rpool and all entries are set to max, so * safe to delete this rpool. */ free_cg_rpool_locked(rpool); } } /** * rdmacg_uncharge_hierarchy - hierarchically uncharge rdma resource count * @cg: pointer to cg to uncharge and all parents in hierarchy * @device: pointer to rdmacg device * @stop_cg: while traversing hirerchy, when meet with stop_cg cgroup * stop uncharging * @index: index of the resource to uncharge in cg in given resource pool */ static void rdmacg_uncharge_hierarchy(struct rdma_cgroup *cg, struct rdmacg_device *device, struct rdma_cgroup *stop_cg, enum rdmacg_resource_type index) { struct rdma_cgroup *p; mutex_lock(&rdmacg_mutex); for (p = cg; p != stop_cg; p = parent_rdmacg(p)) uncharge_cg_locked(p, device, index); mutex_unlock(&rdmacg_mutex); css_put(&cg->css); } /** * rdmacg_uncharge - hierarchically uncharge rdma resource count * @cg: pointer to cg to uncharge and all parents in hierarchy * @device: pointer to rdmacg device * @index: index of the resource to uncharge in cgroup in given resource pool */ void rdmacg_uncharge(struct rdma_cgroup *cg, struct rdmacg_device *device, enum rdmacg_resource_type index) { if (index >= RDMACG_RESOURCE_MAX) return; rdmacg_uncharge_hierarchy(cg, device, NULL, index); } EXPORT_SYMBOL(rdmacg_uncharge); /** * rdmacg_try_charge - hierarchically try to charge the rdma resource * @rdmacg: pointer to rdma cgroup which will own this resource * @device: pointer to rdmacg device * @index: index of the resource to charge in cgroup (resource pool) * * This function follows charging resource in hierarchical way. * It will fail if the charge would cause the new value to exceed the * hierarchical limit. * Returns 0 if the charge succeeded, otherwise -EAGAIN, -ENOMEM or -EINVAL. * Returns pointer to rdmacg for this resource when charging is successful. * * Charger needs to account resources on two criteria. * (a) per cgroup & (b) per device resource usage. * Per cgroup resource usage ensures that tasks of cgroup doesn't cross * the configured limits. Per device provides granular configuration * in multi device usage. It allocates resource pool in the hierarchy * for each parent it come across for first resource. Later on resource * pool will be available. Therefore it will be much faster thereon * to charge/uncharge. */ int rdmacg_try_charge(struct rdma_cgroup **rdmacg, struct rdmacg_device *device, enum rdmacg_resource_type index) { struct rdma_cgroup *cg, *p; struct rdmacg_resource_pool *rpool; s64 new; int ret = 0; if (index >= RDMACG_RESOURCE_MAX) return -EINVAL; /* * hold on to css, as cgroup can be removed but resource * accounting happens on css. */ cg = get_current_rdmacg(); mutex_lock(&rdmacg_mutex); for (p = cg; p; p = parent_rdmacg(p)) { rpool = get_cg_rpool_locked(p, device); if (IS_ERR(rpool)) { ret = PTR_ERR(rpool); goto err; } else { new = rpool->resources[index].usage + 1; if (new > rpool->resources[index].max) { ret = -EAGAIN; goto err; } else { rpool->resources[index].usage = new; rpool->usage_sum++; } } } mutex_unlock(&rdmacg_mutex); *rdmacg = cg; return 0; err: mutex_unlock(&rdmacg_mutex); rdmacg_uncharge_hierarchy(cg, device, p, index); return ret; } EXPORT_SYMBOL(rdmacg_try_charge); /** * rdmacg_register_device - register rdmacg device to rdma controller. * @device: pointer to rdmacg device whose resources need to be accounted. * * If IB stack wish a device to participate in rdma cgroup resource * tracking, it must invoke this API to register with rdma cgroup before * any user space application can start using the RDMA resources. */ void rdmacg_register_device(struct rdmacg_device *device) { INIT_LIST_HEAD(&device->dev_node); INIT_LIST_HEAD(&device->rpools); mutex_lock(&rdmacg_mutex); list_add_tail(&device->dev_node, &rdmacg_devices); mutex_unlock(&rdmacg_mutex); } EXPORT_SYMBOL(rdmacg_register_device); /** * rdmacg_unregister_device - unregister rdmacg device from rdma controller. * @device: pointer to rdmacg device which was previously registered with rdma * controller using rdmacg_register_device(). * * IB stack must invoke this after all the resources of the IB device * are destroyed and after ensuring that no more resources will be created * when this API is invoked. */ void rdmacg_unregister_device(struct rdmacg_device *device) { struct rdmacg_resource_pool *rpool, *tmp; /* * Synchronize with any active resource settings, * usage query happening via configfs. */ mutex_lock(&rdmacg_mutex); list_del_init(&device->dev_node); /* * Now that this device is off the cgroup list, its safe to free * all the rpool resources. */ list_for_each_entry_safe(rpool, tmp, &device->rpools, dev_node) free_cg_rpool_locked(rpool); mutex_unlock(&rdmacg_mutex); } EXPORT_SYMBOL(rdmacg_unregister_device); static int parse_resource(char *c, int *intval) { substring_t argstr; char *name, *value = c; size_t len; int ret, i; name = strsep(&value, "="); if (!name || !value) return -EINVAL; i = match_string(rdmacg_resource_names, RDMACG_RESOURCE_MAX, name); if (i < 0) return i; len = strlen(value); argstr.from = value; argstr.to = value + len; ret = match_int(&argstr, intval); if (ret >= 0) { if (*intval < 0) return -EINVAL; return i; } if (strncmp(value, RDMACG_MAX_STR, len) == 0) { *intval = S32_MAX; return i; } return -EINVAL; } static int rdmacg_parse_limits(char *options, int *new_limits, unsigned long *enables) { char *c; int err = -EINVAL; /* parse resource options */ while ((c = strsep(&options, " ")) != NULL) { int index, intval; index = parse_resource(c, &intval); if (index < 0) goto err; new_limits[index] = intval; *enables |= BIT(index); } return 0; err: return err; } static struct rdmacg_device *rdmacg_get_device_locked(const char *name) { struct rdmacg_device *device; lockdep_assert_held(&rdmacg_mutex); list_for_each_entry(device, &rdmacg_devices, dev_node) if (!strcmp(name, device->name)) return device; return NULL; } static ssize_t rdmacg_resource_set_max(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { struct rdma_cgroup *cg = css_rdmacg(of_css(of)); const char *dev_name; struct rdmacg_resource_pool *rpool; struct rdmacg_device *device; char *options = strstrip(buf); int *new_limits; unsigned long enables = 0; int i = 0, ret = 0; /* extract the device name first */ dev_name = strsep(&options, " "); if (!dev_name) { ret = -EINVAL; goto err; } new_limits = kcalloc(RDMACG_RESOURCE_MAX, sizeof(int), GFP_KERNEL); if (!new_limits) { ret = -ENOMEM; goto err; } ret = rdmacg_parse_limits(options, new_limits, &enables); if (ret) goto parse_err; /* acquire lock to synchronize with hot plug devices */ mutex_lock(&rdmacg_mutex); device = rdmacg_get_device_locked(dev_name); if (!device) { ret = -ENODEV; goto dev_err; } rpool = get_cg_rpool_locked(cg, device); if (IS_ERR(rpool)) { ret = PTR_ERR(rpool); goto dev_err; } /* now set the new limits of the rpool */ for_each_set_bit(i, &enables, RDMACG_RESOURCE_MAX) set_resource_limit(rpool, i, new_limits[i]); if (rpool->usage_sum == 0 && rpool->num_max_cnt == RDMACG_RESOURCE_MAX) { /* * No user of the rpool and all entries are set to max, so * safe to delete this rpool. */ free_cg_rpool_locked(rpool); } dev_err: mutex_unlock(&rdmacg_mutex); parse_err: kfree(new_limits); err: return ret ?: nbytes; } static void print_rpool_values(struct seq_file *sf, struct rdmacg_resource_pool *rpool) { enum rdmacg_file_type sf_type; int i; u32 value; sf_type = seq_cft(sf)->private; for (i = 0; i < RDMACG_RESOURCE_MAX; i++) { seq_puts(sf, rdmacg_resource_names[i]); seq_putc(sf, '='); if (sf_type == RDMACG_RESOURCE_TYPE_MAX) { if (rpool) value = rpool->resources[i].max; else value = S32_MAX; } else { if (rpool) value = rpool->resources[i].usage; else value = 0; } if (value == S32_MAX) seq_puts(sf, RDMACG_MAX_STR); else seq_printf(sf, "%d", value); seq_putc(sf, ' '); } } static int rdmacg_resource_read(struct seq_file *sf, void *v) { struct rdmacg_device *device; struct rdmacg_resource_pool *rpool; struct rdma_cgroup *cg = css_rdmacg(seq_css(sf)); mutex_lock(&rdmacg_mutex); list_for_each_entry(device, &rdmacg_devices, dev_node) { seq_printf(sf, "%s ", device->name); rpool = find_cg_rpool_locked(cg, device); print_rpool_values(sf, rpool); seq_putc(sf, '\n'); } mutex_unlock(&rdmacg_mutex); return 0; } static struct cftype rdmacg_files[] = { { .name = "max", .write = rdmacg_resource_set_max, .seq_show = rdmacg_resource_read, .private = RDMACG_RESOURCE_TYPE_MAX, .flags = CFTYPE_NOT_ON_ROOT, }, { .name = "current", .seq_show = rdmacg_resource_read, .private = RDMACG_RESOURCE_TYPE_STAT, .flags = CFTYPE_NOT_ON_ROOT, }, { } /* terminate */ }; static struct cgroup_subsys_state * rdmacg_css_alloc(struct cgroup_subsys_state *parent) { struct rdma_cgroup *cg; cg = kzalloc(sizeof(*cg), GFP_KERNEL); if (!cg) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&cg->rpools); return &cg->css; } static void rdmacg_css_free(struct cgroup_subsys_state *css) { struct rdma_cgroup *cg = css_rdmacg(css); kfree(cg); } /** * rdmacg_css_offline - cgroup css_offline callback * @css: css of interest * * This function is called when @css is about to go away and responsible * for shooting down all rdmacg associated with @css. As part of that it * marks all the resource pool entries to max value, so that when resources are * uncharged, associated resource pool can be freed as well. */ static void rdmacg_css_offline(struct cgroup_subsys_state *css) { struct rdma_cgroup *cg = css_rdmacg(css); struct rdmacg_resource_pool *rpool; mutex_lock(&rdmacg_mutex); list_for_each_entry(rpool, &cg->rpools, cg_node) set_all_resource_max_limit(rpool); mutex_unlock(&rdmacg_mutex); } struct cgroup_subsys rdma_cgrp_subsys = { .css_alloc = rdmacg_css_alloc, .css_free = rdmacg_css_free, .css_offline = rdmacg_css_offline, .legacy_cftypes = rdmacg_files, .dfl_cftypes = rdmacg_files, };
1 35 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 /* * Copyright (c) 2016 Intel Corporation * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting documentation, and * that the name of the copyright holders not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. The copyright holders make no representations * about the suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ #ifndef __DRM_PROPERTY_H__ #define __DRM_PROPERTY_H__ #include <linux/list.h> #include <linux/ctype.h> #include <drm/drm_mode_object.h> #include <uapi/drm/drm_mode.h> /** * struct drm_property_enum - symbolic values for enumerations * @head: list of enum values, linked to &drm_property.enum_list * @name: symbolic name for the enum * * For enumeration and bitmask properties this structure stores the symbolic * decoding for each value. This is used for example for the rotation property. */ struct drm_property_enum { /** * @value: numeric property value for this enum entry * * If the property has the type &DRM_MODE_PROP_BITMASK, @value stores a * bitshift, not a bitmask. In other words, the enum entry is enabled * if the bit number @value is set in the property's value. This enum * entry has the bitmask ``1 << value``. */ uint64_t value; struct list_head head; char name[DRM_PROP_NAME_LEN]; }; /** * struct drm_property - modeset object property * * This structure represent a modeset object property. It combines both the name * of the property with the set of permissible values. This means that when a * driver wants to use a property with the same name on different objects, but * with different value ranges, then it must create property for each one. An * example would be rotation of &drm_plane, when e.g. the primary plane cannot * be rotated. But if both the name and the value range match, then the same * property structure can be instantiated multiple times for the same object. * Userspace must be able to cope with this and cannot assume that the same * symbolic property will have the same modeset object ID on all modeset * objects. * * Properties are created by one of the special functions, as explained in * detail in the @flags structure member. * * To actually expose a property it must be attached to each object using * drm_object_attach_property(). Currently properties can only be attached to * &drm_connector, &drm_crtc and &drm_plane. * * Properties are also used as the generic metadatatransport for the atomic * IOCTL. Everything that was set directly in structures in the legacy modeset * IOCTLs (like the plane source or destination windows, or e.g. the links to * the CRTC) is exposed as a property with the DRM_MODE_PROP_ATOMIC flag set. */ struct drm_property { /** * @head: per-device list of properties, for cleanup. */ struct list_head head; /** * @base: base KMS object */ struct drm_mode_object base; /** * @flags: * * Property flags and type. A property needs to be one of the following * types: * * DRM_MODE_PROP_RANGE * Range properties report their minimum and maximum admissible unsigned values. * The KMS core verifies that values set by application fit in that * range. The range is unsigned. Range properties are created using * drm_property_create_range(). * * DRM_MODE_PROP_SIGNED_RANGE * Range properties report their minimum and maximum admissible unsigned values. * The KMS core verifies that values set by application fit in that * range. The range is signed. Range properties are created using * drm_property_create_signed_range(). * * DRM_MODE_PROP_ENUM * Enumerated properties take a numerical value that ranges from 0 to * the number of enumerated values defined by the property minus one, * and associate a free-formed string name to each value. Applications * can retrieve the list of defined value-name pairs and use the * numerical value to get and set property instance values. Enum * properties are created using drm_property_create_enum(). * * DRM_MODE_PROP_BITMASK * Bitmask properties are enumeration properties that additionally * restrict all enumerated values to the 0..63 range. Bitmask property * instance values combine one or more of the enumerated bits defined * by the property. Bitmask properties are created using * drm_property_create_bitmask(). * * DRM_MODE_PROP_OBJECT * Object properties are used to link modeset objects. This is used * extensively in the atomic support to create the display pipeline, * by linking &drm_framebuffer to &drm_plane, &drm_plane to * &drm_crtc and &drm_connector to &drm_crtc. An object property can * only link to a specific type of &drm_mode_object, this limit is * enforced by the core. Object properties are created using * drm_property_create_object(). * * Object properties work like blob properties, but in a more * general fashion. They are limited to atomic drivers and must have * the DRM_MODE_PROP_ATOMIC flag set. * * DRM_MODE_PROP_BLOB * Blob properties store a binary blob without any format restriction. * The binary blobs are created as KMS standalone objects, and blob * property instance values store the ID of their associated blob * object. Blob properties are created by calling * drm_property_create() with DRM_MODE_PROP_BLOB as the type. * * Actual blob objects to contain blob data are created using * drm_property_create_blob(), or through the corresponding IOCTL. * * Besides the built-in limit to only accept blob objects blob * properties work exactly like object properties. The only reasons * blob properties exist is backwards compatibility with existing * userspace. * * In addition a property can have any combination of the below flags: * * DRM_MODE_PROP_ATOMIC * Set for properties which encode atomic modeset state. Such * properties are not exposed to legacy userspace. * * DRM_MODE_PROP_IMMUTABLE * Set for properties whose values cannot be changed by * userspace. The kernel is allowed to update the value of these * properties. This is generally used to expose probe state to * userspace, e.g. the EDID, or the connector path property on DP * MST sinks. Kernel can update the value of an immutable property * by calling drm_object_property_set_value(). */ uint32_t flags; /** * @name: symbolic name of the properties */ char name[DRM_PROP_NAME_LEN]; /** * @num_values: size of the @values array. */ uint32_t num_values; /** * @values: * * Array with limits and values for the property. The * interpretation of these limits is dependent upon the type per @flags. */ uint64_t *values; /** * @dev: DRM device */ struct drm_device *dev; /** * @enum_list: * * List of &drm_prop_enum_list structures with the symbolic names for * enum and bitmask values. */ struct list_head enum_list; }; /** * struct drm_property_blob - Blob data for &drm_property * @base: base KMS object * @dev: DRM device * @head_global: entry on the global blob list in * &drm_mode_config.property_blob_list. * @head_file: entry on the per-file blob list in &drm_file.blobs list. * @length: size of the blob in bytes, invariant over the lifetime of the object * @data: actual data, embedded at the end of this structure * * Blobs are used to store bigger values than what fits directly into the 64 * bits available for a &drm_property. * * Blobs are reference counted using drm_property_blob_get() and * drm_property_blob_put(). They are created using drm_property_create_blob(). */ struct drm_property_blob { struct drm_mode_object base; struct drm_device *dev; struct list_head head_global; struct list_head head_file; size_t length; void *data; }; struct drm_prop_enum_list { int type; const char *name; }; #define obj_to_property(x) container_of(x, struct drm_property, base) #define obj_to_blob(x) container_of(x, struct drm_property_blob, base) /** * drm_property_type_is - check the type of a property * @property: property to check * @type: property type to compare with * * This is a helper function becauase the uapi encoding of property types is * a bit special for historical reasons. */ static inline bool drm_property_type_is(struct drm_property *property, uint32_t type) { /* instanceof for props.. handles extended type vs original types: */ if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) return (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) == type; return property->flags & type; } struct drm_property *drm_property_create(struct drm_device *dev, u32 flags, const char *name, int num_values); struct drm_property *drm_property_create_enum(struct drm_device *dev, u32 flags, const char *name, const struct drm_prop_enum_list *props, int num_values); struct drm_property *drm_property_create_bitmask(struct drm_device *dev, u32 flags, const char *name, const struct drm_prop_enum_list *props, int num_props, uint64_t supported_bits); struct drm_property *drm_property_create_range(struct drm_device *dev, u32 flags, const char *name, uint64_t min, uint64_t max); struct drm_property *drm_property_create_signed_range(struct drm_device *dev, u32 flags, const char *name, int64_t min, int64_t max); struct drm_property *drm_property_create_object(struct drm_device *dev, u32 flags, const char *name, uint32_t type); struct drm_property *drm_property_create_bool(struct drm_device *dev, u32 flags, const char *name); int drm_property_add_enum(struct drm_property *property, uint64_t value, const char *name); void drm_property_destroy(struct drm_device *dev, struct drm_property *property); struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, size_t length, const void *data); struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev, uint32_t id); int drm_property_replace_blob_from_id(struct drm_device *dev, struct drm_property_blob **blob, uint64_t blob_id, ssize_t expected_size, ssize_t expected_elem_size, bool *replaced); int drm_property_replace_global_blob(struct drm_device *dev, struct drm_property_blob **replace, size_t length, const void *data, struct drm_mode_object *obj_holds_id, struct drm_property *prop_holds_id); bool drm_property_replace_blob(struct drm_property_blob **blob, struct drm_property_blob *new_blob); struct drm_property_blob *drm_property_blob_get(struct drm_property_blob *blob); void drm_property_blob_put(struct drm_property_blob *blob); /** * drm_property_find - find property object * @dev: DRM device * @file_priv: drm file to check for lease against. * @id: property object id * * This function looks up the property object specified by id and returns it. */ static inline struct drm_property *drm_property_find(struct drm_device *dev, struct drm_file *file_priv, uint32_t id) { struct drm_mode_object *mo; mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_PROPERTY); return mo ? obj_to_property(mo) : NULL; } #endif
2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_KVM_CLOCK_H #define _ASM_X86_KVM_CLOCK_H #include <linux/percpu.h> DECLARE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu); static __always_inline struct pvclock_vcpu_time_info *this_cpu_pvti(void) { return &this_cpu_read(hv_clock_per_cpu)->pvti; } static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void) { return this_cpu_read(hv_clock_per_cpu); } #endif /* _ASM_X86_KVM_CLOCK_H */
8 8 8 8 8 8 8 8 8 7 7 8 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 // SPDX-License-Identifier: GPL-2.0-or-later /* * Scatterlist Cryptographic API. * * Procfs information. * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> */ #include <linux/atomic.h> #include <linux/init.h> #include <linux/crypto.h> #include <linux/fips.h> #include <linux/module.h> /* for module_name() */ #include <linux/rwsem.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include "internal.h" static void *c_start(struct seq_file *m, loff_t *pos) { down_read(&crypto_alg_sem); return seq_list_start(&crypto_alg_list, *pos); } static void *c_next(struct seq_file *m, void *p, loff_t *pos) { return seq_list_next(p, &crypto_alg_list, pos); } static void c_stop(struct seq_file *m, void *p) { up_read(&crypto_alg_sem); } static int c_show(struct seq_file *m, void *p) { struct crypto_alg *alg = list_entry(p, struct crypto_alg, cra_list); seq_printf(m, "name : %s\n", alg->cra_name); seq_printf(m, "driver : %s\n", alg->cra_driver_name); seq_printf(m, "module : %s\n", module_name(alg->cra_module)); seq_printf(m, "priority : %d\n", alg->cra_priority); seq_printf(m, "refcnt : %u\n", refcount_read(&alg->cra_refcnt)); seq_printf(m, "selftest : %s\n", (alg->cra_flags & CRYPTO_ALG_TESTED) ? "passed" : "unknown"); seq_printf(m, "internal : %s\n", str_yes_no(alg->cra_flags & CRYPTO_ALG_INTERNAL)); if (fips_enabled) seq_printf(m, "fips : %s\n", str_no_yes(alg->cra_flags & CRYPTO_ALG_FIPS_INTERNAL)); if (alg->cra_flags & CRYPTO_ALG_LARVAL) { seq_printf(m, "type : larval\n"); seq_printf(m, "flags : 0x%x\n", alg->cra_flags); goto out; } if (alg->cra_type && alg->cra_type->show) { alg->cra_type->show(m, alg); goto out; } switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_CIPHER: seq_printf(m, "type : cipher\n"); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "min keysize : %u\n", alg->cra_cipher.cia_min_keysize); seq_printf(m, "max keysize : %u\n", alg->cra_cipher.cia_max_keysize); break; default: seq_printf(m, "type : unknown\n"); break; } out: seq_putc(m, '\n'); return 0; } static const struct seq_operations crypto_seq_ops = { .start = c_start, .next = c_next, .stop = c_stop, .show = c_show }; void __init crypto_init_proc(void) { proc_create_seq("crypto", 0, NULL, &crypto_seq_ops); } void __exit crypto_exit_proc(void) { remove_proc_entry("crypto", NULL); }
31 31 31 31 31 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 // SPDX-License-Identifier: GPL-2.0 /* * PCI Express I/O Virtualization (IOV) support * Address Translation Service 1.0 * Page Request Interface added by Joerg Roedel <joerg.roedel@amd.com> * PASID support added by Joerg Roedel <joerg.roedel@amd.com> * * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com> * Copyright (C) 2011 Advanced Micro Devices, */ #include <linux/bitfield.h> #include <linux/export.h> #include <linux/pci-ats.h> #include <linux/pci.h> #include <linux/slab.h> #include "pci.h" void pci_ats_init(struct pci_dev *dev) { int pos; if (pci_ats_disabled()) return; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS); if (!pos) return; dev->ats_cap = pos; } /** * pci_ats_supported - check if the device can use ATS * @dev: the PCI device * * Returns true if the device supports ATS and is allowed to use it, false * otherwise. */ bool pci_ats_supported(struct pci_dev *dev) { if (!dev->ats_cap) return false; return (dev->untrusted == 0); } EXPORT_SYMBOL_GPL(pci_ats_supported); /** * pci_prepare_ats - Setup the PS for ATS * @dev: the PCI device * @ps: the IOMMU page shift * * This must be done by the IOMMU driver on the PF before any VFs are created to * ensure that the VF can have ATS enabled. * * Returns 0 on success, or negative on failure. */ int pci_prepare_ats(struct pci_dev *dev, int ps) { u16 ctrl; if (!pci_ats_supported(dev)) return -EINVAL; if (WARN_ON(dev->ats_enabled)) return -EBUSY; if (ps < PCI_ATS_MIN_STU) return -EINVAL; if (dev->is_virtfn) return 0; dev->ats_stu = ps; ctrl = PCI_ATS_CTRL_STU(dev->ats_stu - PCI_ATS_MIN_STU); pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl); return 0; } EXPORT_SYMBOL_GPL(pci_prepare_ats); /** * pci_enable_ats - enable the ATS capability * @dev: the PCI device * @ps: the IOMMU page shift * * Returns 0 on success, or negative on failure. */ int pci_enable_ats(struct pci_dev *dev, int ps) { u16 ctrl; struct pci_dev *pdev; if (!pci_ats_supported(dev)) return -EINVAL; if (WARN_ON(dev->ats_enabled)) return -EBUSY; if (ps < PCI_ATS_MIN_STU) return -EINVAL; /* * Note that enabling ATS on a VF fails unless it's already enabled * with the same STU on the PF. */ ctrl = PCI_ATS_CTRL_ENABLE; if (dev->is_virtfn) { pdev = pci_physfn(dev); if (pdev->ats_stu != ps) return -EINVAL; } else { dev->ats_stu = ps; ctrl |= PCI_ATS_CTRL_STU(dev->ats_stu - PCI_ATS_MIN_STU); } pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl); dev->ats_enabled = 1; return 0; } EXPORT_SYMBOL_GPL(pci_enable_ats); /** * pci_disable_ats - disable the ATS capability * @dev: the PCI device */ void pci_disable_ats(struct pci_dev *dev) { u16 ctrl; if (WARN_ON(!dev->ats_enabled)) return; pci_read_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, &ctrl); ctrl &= ~PCI_ATS_CTRL_ENABLE; pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl); dev->ats_enabled = 0; } EXPORT_SYMBOL_GPL(pci_disable_ats); void pci_restore_ats_state(struct pci_dev *dev) { u16 ctrl; if (!dev->ats_enabled) return; ctrl = PCI_ATS_CTRL_ENABLE; if (!dev->is_virtfn) ctrl |= PCI_ATS_CTRL_STU(dev->ats_stu - PCI_ATS_MIN_STU); pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl); } /** * pci_ats_queue_depth - query the ATS Invalidate Queue Depth * @dev: the PCI device * * Returns the queue depth on success, or negative on failure. * * The ATS spec uses 0 in the Invalidate Queue Depth field to * indicate that the function can accept 32 Invalidate Request. * But here we use the `real' values (i.e. 1~32) for the Queue * Depth; and 0 indicates the function shares the Queue with * other functions (doesn't exclusively own a Queue). */ int pci_ats_queue_depth(struct pci_dev *dev) { u16 cap; if (!dev->ats_cap) return -EINVAL; if (dev->is_virtfn) return 0; pci_read_config_word(dev, dev->ats_cap + PCI_ATS_CAP, &cap); return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) : PCI_ATS_MAX_QDEP; } /** * pci_ats_page_aligned - Return Page Aligned Request bit status. * @pdev: the PCI device * * Returns 1, if the Untranslated Addresses generated by the device * are always aligned or 0 otherwise. * * Per PCIe spec r4.0, sec 10.5.1.2, if the Page Aligned Request bit * is set, it indicates the Untranslated Addresses generated by the * device are always aligned to a 4096 byte boundary. */ int pci_ats_page_aligned(struct pci_dev *pdev) { u16 cap; if (!pdev->ats_cap) return 0; pci_read_config_word(pdev, pdev->ats_cap + PCI_ATS_CAP, &cap); if (cap & PCI_ATS_CAP_PAGE_ALIGNED) return 1; return 0; } #ifdef CONFIG_PCI_PRI void pci_pri_init(struct pci_dev *pdev) { u16 status; pdev->pri_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); if (!pdev->pri_cap) return; pci_read_config_word(pdev, pdev->pri_cap + PCI_PRI_STATUS, &status); if (status & PCI_PRI_STATUS_PASID) pdev->pasid_required = 1; } /** * pci_enable_pri - Enable PRI capability * @pdev: PCI device structure * @reqs: outstanding requests * * Returns 0 on success, negative value on error */ int pci_enable_pri(struct pci_dev *pdev, u32 reqs) { u16 control, status; u32 max_requests; int pri = pdev->pri_cap; /* * VFs must not implement the PRI Capability. If their PF * implements PRI, it is shared by the VFs, so if the PF PRI is * enabled, it is also enabled for the VF. */ if (pdev->is_virtfn) { if (pci_physfn(pdev)->pri_enabled) return 0; return -EINVAL; } if (WARN_ON(pdev->pri_enabled)) return -EBUSY; if (!pri) return -EINVAL; pci_read_config_word(pdev, pri + PCI_PRI_STATUS, &status); if (!(status & PCI_PRI_STATUS_STOPPED)) return -EBUSY; pci_read_config_dword(pdev, pri + PCI_PRI_MAX_REQ, &max_requests); reqs = min(max_requests, reqs); pdev->pri_reqs_alloc = reqs; pci_write_config_dword(pdev, pri + PCI_PRI_ALLOC_REQ, reqs); control = PCI_PRI_CTRL_ENABLE; pci_write_config_word(pdev, pri + PCI_PRI_CTRL, control); pdev->pri_enabled = 1; return 0; } /** * pci_disable_pri - Disable PRI capability * @pdev: PCI device structure * * Only clears the enabled-bit, regardless of its former value */ void pci_disable_pri(struct pci_dev *pdev) { u16 control; int pri = pdev->pri_cap; /* VFs share the PF PRI */ if (pdev->is_virtfn) return; if (WARN_ON(!pdev->pri_enabled)) return; if (!pri) return; pci_read_config_word(pdev, pri + PCI_PRI_CTRL, &control); control &= ~PCI_PRI_CTRL_ENABLE; pci_write_config_word(pdev, pri + PCI_PRI_CTRL, control); pdev->pri_enabled = 0; } EXPORT_SYMBOL_GPL(pci_disable_pri); /** * pci_restore_pri_state - Restore PRI * @pdev: PCI device structure */ void pci_restore_pri_state(struct pci_dev *pdev) { u16 control = PCI_PRI_CTRL_ENABLE; u32 reqs = pdev->pri_reqs_alloc; int pri = pdev->pri_cap; if (pdev->is_virtfn) return; if (!pdev->pri_enabled) return; if (!pri) return; pci_write_config_dword(pdev, pri + PCI_PRI_ALLOC_REQ, reqs); pci_write_config_word(pdev, pri + PCI_PRI_CTRL, control); } /** * pci_reset_pri - Resets device's PRI state * @pdev: PCI device structure * * The PRI capability must be disabled before this function is called. * Returns 0 on success, negative value on error. */ int pci_reset_pri(struct pci_dev *pdev) { u16 control; int pri = pdev->pri_cap; if (pdev->is_virtfn) return 0; if (WARN_ON(pdev->pri_enabled)) return -EBUSY; if (!pri) return -EINVAL; control = PCI_PRI_CTRL_RESET; pci_write_config_word(pdev, pri + PCI_PRI_CTRL, control); return 0; } /** * pci_prg_resp_pasid_required - Return PRG Response PASID Required bit * status. * @pdev: PCI device structure * * Returns 1 if PASID is required in PRG Response Message, 0 otherwise. */ int pci_prg_resp_pasid_required(struct pci_dev *pdev) { if (pdev->is_virtfn) pdev = pci_physfn(pdev); return pdev->pasid_required; } /** * pci_pri_supported - Check if PRI is supported. * @pdev: PCI device structure * * Returns true if PRI capability is present, false otherwise. */ bool pci_pri_supported(struct pci_dev *pdev) { /* VFs share the PF PRI */ if (pci_physfn(pdev)->pri_cap) return true; return false; } EXPORT_SYMBOL_GPL(pci_pri_supported); #endif /* CONFIG_PCI_PRI */ #ifdef CONFIG_PCI_PASID void pci_pasid_init(struct pci_dev *pdev) { pdev->pasid_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID); } /** * pci_enable_pasid - Enable the PASID capability * @pdev: PCI device structure * @features: Features to enable * * Returns 0 on success, negative value on error. This function checks * whether the features are actually supported by the device and returns * an error if not. */ int pci_enable_pasid(struct pci_dev *pdev, int features) { u16 control, supported; int pasid = pdev->pasid_cap; /* * VFs must not implement the PASID Capability, but if a PF * supports PASID, its VFs share the PF PASID configuration. */ if (pdev->is_virtfn) { if (pci_physfn(pdev)->pasid_enabled) return 0; return -EINVAL; } if (WARN_ON(pdev->pasid_enabled)) return -EBUSY; if (!pdev->eetlp_prefix_max && !pdev->pasid_no_tlp) return -EINVAL; if (!pasid) return -EINVAL; if (!pci_acs_path_enabled(pdev, NULL, PCI_ACS_RR | PCI_ACS_UF)) return -EINVAL; pci_read_config_word(pdev, pasid + PCI_PASID_CAP, &supported); supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV; /* User wants to enable anything unsupported? */ if ((supported & features) != features) return -EINVAL; control = PCI_PASID_CTRL_ENABLE | features; pdev->pasid_features = features; pci_write_config_word(pdev, pasid + PCI_PASID_CTRL, control); pdev->pasid_enabled = 1; return 0; } EXPORT_SYMBOL_GPL(pci_enable_pasid); /** * pci_disable_pasid - Disable the PASID capability * @pdev: PCI device structure */ void pci_disable_pasid(struct pci_dev *pdev) { u16 control = 0; int pasid = pdev->pasid_cap; /* VFs share the PF PASID configuration */ if (pdev->is_virtfn) return; if (WARN_ON(!pdev->pasid_enabled)) return; if (!pasid) return; pci_write_config_word(pdev, pasid + PCI_PASID_CTRL, control); pdev->pasid_enabled = 0; } EXPORT_SYMBOL_GPL(pci_disable_pasid); /** * pci_restore_pasid_state - Restore PASID capabilities * @pdev: PCI device structure */ void pci_restore_pasid_state(struct pci_dev *pdev) { u16 control; int pasid = pdev->pasid_cap; if (pdev->is_virtfn) return; if (!pdev->pasid_enabled) return; if (!pasid) return; control = PCI_PASID_CTRL_ENABLE | pdev->pasid_features; pci_write_config_word(pdev, pasid + PCI_PASID_CTRL, control); } /** * pci_pasid_features - Check which PASID features are supported * @pdev: PCI device structure * * Return a negative value when no PASID capability is present. * Otherwise return a bitmask with supported features. Current * features reported are: * PCI_PASID_CAP_EXEC - Execute permission supported * PCI_PASID_CAP_PRIV - Privileged mode supported */ int pci_pasid_features(struct pci_dev *pdev) { u16 supported; int pasid; if (pdev->is_virtfn) pdev = pci_physfn(pdev); pasid = pdev->pasid_cap; if (!pasid) return -EINVAL; pci_read_config_word(pdev, pasid + PCI_PASID_CAP, &supported); supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV; return supported; } EXPORT_SYMBOL_GPL(pci_pasid_features); /** * pci_max_pasids - Get maximum number of PASIDs supported by device * @pdev: PCI device structure * * Returns negative value when PASID capability is not present. * Otherwise it returns the number of supported PASIDs. */ int pci_max_pasids(struct pci_dev *pdev) { u16 supported; int pasid; if (pdev->is_virtfn) pdev = pci_physfn(pdev); pasid = pdev->pasid_cap; if (!pasid) return -EINVAL; pci_read_config_word(pdev, pasid + PCI_PASID_CAP, &supported); return (1 << FIELD_GET(PCI_PASID_CAP_WIDTH, supported)); } EXPORT_SYMBOL_GPL(pci_max_pasids); /** * pci_pasid_status - Check the PASID status * @pdev: PCI device structure * * Returns a negative value when no PASID capability is present. * Otherwise the value of the control register is returned. * Status reported are: * * PCI_PASID_CTRL_ENABLE - PASID enabled * PCI_PASID_CTRL_EXEC - Execute permission enabled * PCI_PASID_CTRL_PRIV - Privileged mode enabled */ int pci_pasid_status(struct pci_dev *pdev) { int pasid; u16 ctrl; if (pdev->is_virtfn) pdev = pci_physfn(pdev); pasid = pdev->pasid_cap; if (!pasid) return -EINVAL; pci_read_config_word(pdev, pasid + PCI_PASID_CTRL, &ctrl); ctrl &= PCI_PASID_CTRL_ENABLE | PCI_PASID_CTRL_EXEC | PCI_PASID_CTRL_PRIV; return ctrl; } EXPORT_SYMBOL_GPL(pci_pasid_status); #endif /* CONFIG_PCI_PASID */
1 4 1 1 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 // SPDX-License-Identifier: GPL-2.0-only /* Tom Kelly's Scalable TCP * * See http://www.deneholme.net/tom/scalable/ * * John Heffner <jheffner@sc.edu> */ #include <linux/module.h> #include <net/tcp.h> /* These factors derived from the recommended values in the aer: * .01 and 7/8. */ #define TCP_SCALABLE_AI_CNT 100U #define TCP_SCALABLE_MD_SCALE 3 static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked) { struct tcp_sock *tp = tcp_sk(sk); if (!tcp_is_cwnd_limited(sk)) return; if (tcp_in_slow_start(tp)) { acked = tcp_slow_start(tp, acked); if (!acked) return; } tcp_cong_avoid_ai(tp, min(tcp_snd_cwnd(tp), TCP_SCALABLE_AI_CNT), acked); } static u32 tcp_scalable_ssthresh(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); return max(tcp_snd_cwnd(tp) - (tcp_snd_cwnd(tp)>>TCP_SCALABLE_MD_SCALE), 2U); } static struct tcp_congestion_ops tcp_scalable __read_mostly = { .ssthresh = tcp_scalable_ssthresh, .undo_cwnd = tcp_reno_undo_cwnd, .cong_avoid = tcp_scalable_cong_avoid, .owner = THIS_MODULE, .name = "scalable", }; static int __init tcp_scalable_register(void) { return tcp_register_congestion_control(&tcp_scalable); } static void __exit tcp_scalable_unregister(void) { tcp_unregister_congestion_control(&tcp_scalable); } module_init(tcp_scalable_register); module_exit(tcp_scalable_unregister); MODULE_AUTHOR("John Heffner"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Scalable TCP");
1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 // SPDX-License-Identifier: GPL-2.0-only /* * OCB mode implementation * * Copyright: (c) 2014 Czech Technical University in Prague * (c) 2014 Volkswagen Group Research * Copyright (C) 2022 - 2024 Intel Corporation * Author: Rostislav Lisovy <rostislav.lisovy@fel.cvut.cz> * Funded by: Volkswagen Group Research */ #include <linux/delay.h> #include <linux/if_ether.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/etherdevice.h> #include <linux/rtnetlink.h> #include <net/mac80211.h> #include <linux/unaligned.h> #include "ieee80211_i.h" #include "driver-ops.h" #include "rate.h" #define IEEE80211_OCB_HOUSEKEEPING_INTERVAL (60 * HZ) #define IEEE80211_OCB_PEER_INACTIVITY_LIMIT (240 * HZ) #define IEEE80211_OCB_MAX_STA_ENTRIES 128 /** * enum ocb_deferred_task_flags - mac80211 OCB deferred tasks * @OCB_WORK_HOUSEKEEPING: run the periodic OCB housekeeping tasks * * These flags are used in @wrkq_flags field of &struct ieee80211_if_ocb */ enum ocb_deferred_task_flags { OCB_WORK_HOUSEKEEPING, }; void ieee80211_ocb_rx_no_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid, const u8 *addr, u32 supp_rates) { struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_supported_band *sband; struct sta_info *sta; int band; /* XXX: Consider removing the least recently used entry and * allow new one to be added. */ if (local->num_sta >= IEEE80211_OCB_MAX_STA_ENTRIES) { net_info_ratelimited("%s: No room for a new OCB STA entry %pM\n", sdata->name, addr); return; } ocb_dbg(sdata, "Adding new OCB station %pM\n", addr); rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); if (WARN_ON_ONCE(!chanctx_conf)) { rcu_read_unlock(); return; } band = chanctx_conf->def.chan->band; rcu_read_unlock(); sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); if (!sta) return; /* Add only mandatory rates for now */ sband = local->hw.wiphy->bands[band]; sta->sta.deflink.supp_rates[band] = ieee80211_mandatory_rates(sband); spin_lock(&ifocb->incomplete_lock); list_add(&sta->list, &ifocb->incomplete_stations); spin_unlock(&ifocb->incomplete_lock); wiphy_work_queue(local->hw.wiphy, &sdata->work); } static struct sta_info *ieee80211_ocb_finish_sta(struct sta_info *sta) __acquires(RCU) { struct ieee80211_sub_if_data *sdata = sta->sdata; u8 addr[ETH_ALEN]; memcpy(addr, sta->sta.addr, ETH_ALEN); ocb_dbg(sdata, "Adding new IBSS station %pM (dev=%s)\n", addr, sdata->name); sta_info_move_state(sta, IEEE80211_STA_AUTH); sta_info_move_state(sta, IEEE80211_STA_ASSOC); sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED); rate_control_rate_init(&sta->deflink); /* If it fails, maybe we raced another insertion? */ if (sta_info_insert_rcu(sta)) return sta_info_get(sdata, addr); return sta; } static void ieee80211_ocb_housekeeping(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; ocb_dbg(sdata, "Running ocb housekeeping\n"); ieee80211_sta_expire(sdata, IEEE80211_OCB_PEER_INACTIVITY_LIMIT); mod_timer(&ifocb->housekeeping_timer, round_jiffies(jiffies + IEEE80211_OCB_HOUSEKEEPING_INTERVAL)); } void ieee80211_ocb_work(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; struct sta_info *sta; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (ifocb->joined != true) return; spin_lock_bh(&ifocb->incomplete_lock); while (!list_empty(&ifocb->incomplete_stations)) { sta = list_first_entry(&ifocb->incomplete_stations, struct sta_info, list); list_del(&sta->list); spin_unlock_bh(&ifocb->incomplete_lock); ieee80211_ocb_finish_sta(sta); rcu_read_unlock(); spin_lock_bh(&ifocb->incomplete_lock); } spin_unlock_bh(&ifocb->incomplete_lock); if (test_and_clear_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags)) ieee80211_ocb_housekeeping(sdata); } static void ieee80211_ocb_housekeeping_timer(struct timer_list *t) { struct ieee80211_sub_if_data *sdata = from_timer(sdata, t, u.ocb.housekeeping_timer); struct ieee80211_local *local = sdata->local; struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; set_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags); wiphy_work_queue(local->hw.wiphy, &sdata->work); } void ieee80211_ocb_setup_sdata(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; timer_setup(&ifocb->housekeeping_timer, ieee80211_ocb_housekeeping_timer, 0); INIT_LIST_HEAD(&ifocb->incomplete_stations); spin_lock_init(&ifocb->incomplete_lock); } int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata, struct ocb_setup *setup) { struct ieee80211_chan_req chanreq = { .oper = setup->chandef }; struct ieee80211_local *local = sdata->local; struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; u64 changed = BSS_CHANGED_OCB | BSS_CHANGED_BSSID; int err; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (ifocb->joined == true) return -EINVAL; sdata->deflink.operating_11g_mode = true; sdata->deflink.smps_mode = IEEE80211_SMPS_OFF; sdata->deflink.needed_rx_chains = sdata->local->rx_chains; err = ieee80211_link_use_channel(&sdata->deflink, &chanreq, IEEE80211_CHANCTX_SHARED); if (err) return err; ieee80211_bss_info_change_notify(sdata, changed); ifocb->joined = true; set_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags); wiphy_work_queue(local->hw.wiphy, &sdata->work); netif_carrier_on(sdata->dev); return 0; } int ieee80211_ocb_leave(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; struct ieee80211_local *local = sdata->local; struct sta_info *sta; lockdep_assert_wiphy(sdata->local->hw.wiphy); ifocb->joined = false; sta_info_flush(sdata, -1); spin_lock_bh(&ifocb->incomplete_lock); while (!list_empty(&ifocb->incomplete_stations)) { sta = list_first_entry(&ifocb->incomplete_stations, struct sta_info, list); list_del(&sta->list); spin_unlock_bh(&ifocb->incomplete_lock); sta_info_free(local, sta); spin_lock_bh(&ifocb->incomplete_lock); } spin_unlock_bh(&ifocb->incomplete_lock); netif_carrier_off(sdata->dev); clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_OCB); ieee80211_link_release_channel(&sdata->deflink); skb_queue_purge(&sdata->skb_queue); timer_delete_sync(&sdata->u.ocb.housekeeping_timer); /* If the timer fired while we waited for it, it will have * requeued the work. Now the work will be running again * but will not rearm the timer again because it checks * whether we are connected to the network or not -- at this * point we shouldn't be anymore. */ return 0; }
17 426 556 380 288 381 556 530 47 535 90 537 399 536 537 399 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 /* SPDX-License-Identifier: GPL-2.0-only */ /* * sha256_base.h - core logic for SHA-256 implementations * * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> */ #ifndef _CRYPTO_SHA256_BASE_H #define _CRYPTO_SHA256_BASE_H #include <asm/byteorder.h> #include <linux/unaligned.h> #include <crypto/internal/hash.h> #include <crypto/sha2.h> #include <linux/string.h> #include <linux/types.h> typedef void (sha256_block_fn)(struct sha256_state *sst, u8 const *src, int blocks); static inline int sha224_base_init(struct shash_desc *desc) { struct sha256_state *sctx = shash_desc_ctx(desc); sha224_init(sctx); return 0; } static inline int sha256_base_init(struct shash_desc *desc) { struct sha256_state *sctx = shash_desc_ctx(desc); sha256_init(sctx); return 0; } static inline int lib_sha256_base_do_update(struct sha256_state *sctx, const u8 *data, unsigned int len, sha256_block_fn *block_fn) { unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; sctx->count += len; if (unlikely((partial + len) >= SHA256_BLOCK_SIZE)) { int blocks; if (partial) { int p = SHA256_BLOCK_SIZE - partial; memcpy(sctx->buf + partial, data, p); data += p; len -= p; block_fn(sctx, sctx->buf, 1); } blocks = len / SHA256_BLOCK_SIZE; len %= SHA256_BLOCK_SIZE; if (blocks) { block_fn(sctx, data, blocks); data += blocks * SHA256_BLOCK_SIZE; } partial = 0; } if (len) memcpy(sctx->buf + partial, data, len); return 0; } static inline int sha256_base_do_update(struct shash_desc *desc, const u8 *data, unsigned int len, sha256_block_fn *block_fn) { struct sha256_state *sctx = shash_desc_ctx(desc); return lib_sha256_base_do_update(sctx, data, len, block_fn); } static inline int lib_sha256_base_do_finalize(struct sha256_state *sctx, sha256_block_fn *block_fn) { const int bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64); __be64 *bits = (__be64 *)(sctx->buf + bit_offset); unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; sctx->buf[partial++] = 0x80; if (partial > bit_offset) { memset(sctx->buf + partial, 0x0, SHA256_BLOCK_SIZE - partial); partial = 0; block_fn(sctx, sctx->buf, 1); } memset(sctx->buf + partial, 0x0, bit_offset - partial); *bits = cpu_to_be64(sctx->count << 3); block_fn(sctx, sctx->buf, 1); return 0; } static inline int sha256_base_do_finalize(struct shash_desc *desc, sha256_block_fn *block_fn) { struct sha256_state *sctx = shash_desc_ctx(desc); return lib_sha256_base_do_finalize(sctx, block_fn); } static inline int lib_sha256_base_finish(struct sha256_state *sctx, u8 *out, unsigned int digest_size) { __be32 *digest = (__be32 *)out; int i; for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be32)) put_unaligned_be32(sctx->state[i], digest++); memzero_explicit(sctx, sizeof(*sctx)); return 0; } static inline int sha256_base_finish(struct shash_desc *desc, u8 *out) { unsigned int digest_size = crypto_shash_digestsize(desc->tfm); struct sha256_state *sctx = shash_desc_ctx(desc); return lib_sha256_base_finish(sctx, out, digest_size); } #endif /* _CRYPTO_SHA256_BASE_H */
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BLOCKGROUP_LOCK_H #define _LINUX_BLOCKGROUP_LOCK_H /* * Per-blockgroup locking for ext2 and ext3. * * Simple hashed spinlocking. */ #include <linux/spinlock.h> #include <linux/cache.h> #ifdef CONFIG_SMP #define NR_BG_LOCKS (4 << ilog2(NR_CPUS < 32 ? NR_CPUS : 32)) #else #define NR_BG_LOCKS 1 #endif struct bgl_lock { spinlock_t lock; } ____cacheline_aligned_in_smp; struct blockgroup_lock { struct bgl_lock locks[NR_BG_LOCKS]; }; static inline void bgl_lock_init(struct blockgroup_lock *bgl) { int i; for (i = 0; i < NR_BG_LOCKS; i++) spin_lock_init(&bgl->locks[i].lock); } static inline spinlock_t * bgl_lock_ptr(struct blockgroup_lock *bgl, unsigned int block_group) { return &bgl->locks[block_group & (NR_BG_LOCKS-1)].lock; } #endif
9 5 15 8 12 7 3 3 6 5 22 1 1 2 2 3 3 3 3 3 3 2 3 2 3 3 3 12 12 12 12 7 12 7 12 11 12 11 12 11 1 1 1 1 1 1 1 1 1 2 2 2 2 2 8 8 9 9 9 9 6 6 4 4 4 4 4 6 5 2 1 6 6 6 6 6 6 6 4 2 3 3 3 3 3 3 3 3 2 25 24 22 2 2 2 1 1 10 1 1 1 1 1 20 6 6 2 3 5 5 6 6 5 6 5 5 4 4 15 2 3 2 2 12 5 5 1 4 2 1 2 2 2 1 1 1 1 1 1 1 7 6 6 6 6 25 10 10 10 6 6 6 6 6 6 5 5 5 4 5 5 5 4 5 4 5 2 1 2 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 // SPDX-License-Identifier: MIT /* * vgaarb.c: Implements VGA arbitration. For details refer to * Documentation/gpu/vgaarbiter.rst * * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> * (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com> * (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org> */ #define pr_fmt(fmt) "vgaarb: " fmt #define vgaarb_dbg(dev, fmt, arg...) dev_dbg(dev, "vgaarb: " fmt, ##arg) #define vgaarb_info(dev, fmt, arg...) dev_info(dev, "vgaarb: " fmt, ##arg) #define vgaarb_err(dev, fmt, arg...) dev_err(dev, "vgaarb: " fmt, ##arg) #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/list.h> #include <linux/sched/signal.h> #include <linux/wait.h> #include <linux/spinlock.h> #include <linux/poll.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/screen_info.h> #include <linux/vt.h> #include <linux/console.h> #include <linux/acpi.h> #include <linux/uaccess.h> #include <linux/vgaarb.h> static void vga_arbiter_notify_clients(void); /* * We keep a list of all VGA devices in the system to speed * up the various operations of the arbiter */ struct vga_device { struct list_head list; struct pci_dev *pdev; unsigned int decodes; /* what it decodes */ unsigned int owns; /* what it owns */ unsigned int locks; /* what it locks */ unsigned int io_lock_cnt; /* legacy IO lock count */ unsigned int mem_lock_cnt; /* legacy MEM lock count */ unsigned int io_norm_cnt; /* normal IO count */ unsigned int mem_norm_cnt; /* normal MEM count */ bool bridge_has_one_vga; bool is_firmware_default; /* device selected by firmware */ unsigned int (*set_decode)(struct pci_dev *pdev, bool decode); }; static LIST_HEAD(vga_list); static int vga_count, vga_decode_count; static bool vga_arbiter_used; static DEFINE_SPINLOCK(vga_lock); static DECLARE_WAIT_QUEUE_HEAD(vga_wait_queue); static const char *vga_iostate_to_str(unsigned int iostate) { /* Ignore VGA_RSRC_IO and VGA_RSRC_MEM */ iostate &= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; switch (iostate) { case VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM: return "io+mem"; case VGA_RSRC_LEGACY_IO: return "io"; case VGA_RSRC_LEGACY_MEM: return "mem"; } return "none"; } static int vga_str_to_iostate(char *buf, int str_size, unsigned int *io_state) { /* * In theory, we could hand out locks on IO and MEM separately to * userspace, but this can cause deadlocks. */ if (strncmp(buf, "none", 4) == 0) { *io_state = VGA_RSRC_NONE; return 1; } /* XXX We're not checking the str_size! */ if (strncmp(buf, "io+mem", 6) == 0) goto both; else if (strncmp(buf, "io", 2) == 0) goto both; else if (strncmp(buf, "mem", 3) == 0) goto both; return 0; both: *io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; return 1; } /* This is only used as a cookie, it should not be dereferenced */ static struct pci_dev *vga_default; /* Find somebody in our list */ static struct vga_device *vgadev_find(struct pci_dev *pdev) { struct vga_device *vgadev; list_for_each_entry(vgadev, &vga_list, list) if (pdev == vgadev->pdev) return vgadev; return NULL; } /** * vga_default_device - return the default VGA device, for vgacon * * This can be defined by the platform. The default implementation is * rather dumb and will probably only work properly on single VGA card * setups and/or x86 platforms. * * If your VGA default device is not PCI, you'll have to return NULL here. * In this case, I assume it will not conflict with any PCI card. If this * is not true, I'll have to define two arch hooks for enabling/disabling * the VGA default device if that is possible. This may be a problem with * real _ISA_ VGA cards, in addition to a PCI one. I don't know at this * point how to deal with that card. Can their IOs be disabled at all? If * not, then I suppose it's a matter of having the proper arch hook telling * us about it, so we basically never allow anybody to succeed a vga_get(). */ struct pci_dev *vga_default_device(void) { return vga_default; } EXPORT_SYMBOL_GPL(vga_default_device); void vga_set_default_device(struct pci_dev *pdev) { if (vga_default == pdev) return; pci_dev_put(vga_default); vga_default = pci_dev_get(pdev); } /** * vga_remove_vgacon - deactivate VGA console * * Unbind and unregister vgacon in case pdev is the default VGA device. * Can be called by GPU drivers on initialization to make sure VGA register * access done by vgacon will not disturb the device. * * @pdev: PCI device. */ #if !defined(CONFIG_VGA_CONSOLE) int vga_remove_vgacon(struct pci_dev *pdev) { return 0; } #elif !defined(CONFIG_DUMMY_CONSOLE) int vga_remove_vgacon(struct pci_dev *pdev) { return -ENODEV; } #else int vga_remove_vgacon(struct pci_dev *pdev) { int ret = 0; if (pdev != vga_default) return 0; vgaarb_info(&pdev->dev, "deactivate vga console\n"); console_lock(); if (con_is_bound(&vga_con)) ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1); if (ret == 0) { ret = do_unregister_con_driver(&vga_con); /* Ignore "already unregistered". */ if (ret == -ENODEV) ret = 0; } console_unlock(); return ret; } #endif EXPORT_SYMBOL(vga_remove_vgacon); /* * If we don't ever use VGA arbitration, we should avoid turning off * anything anywhere due to old X servers getting confused about the boot * device not being VGA. */ static void vga_check_first_use(void) { /* * Inform all GPUs in the system that VGA arbitration has occurred * so they can disable resources if possible. */ if (!vga_arbiter_used) { vga_arbiter_used = true; vga_arbiter_notify_clients(); } } static struct vga_device *__vga_tryget(struct vga_device *vgadev, unsigned int rsrc) { struct device *dev = &vgadev->pdev->dev; unsigned int wants, legacy_wants, match; struct vga_device *conflict; unsigned int pci_bits; u32 flags = 0; /* * Account for "normal" resources to lock. If we decode the legacy, * counterpart, we need to request it as well */ if ((rsrc & VGA_RSRC_NORMAL_IO) && (vgadev->decodes & VGA_RSRC_LEGACY_IO)) rsrc |= VGA_RSRC_LEGACY_IO; if ((rsrc & VGA_RSRC_NORMAL_MEM) && (vgadev->decodes & VGA_RSRC_LEGACY_MEM)) rsrc |= VGA_RSRC_LEGACY_MEM; vgaarb_dbg(dev, "%s: %d\n", __func__, rsrc); vgaarb_dbg(dev, "%s: owns: %d\n", __func__, vgadev->owns); /* Check what resources we need to acquire */ wants = rsrc & ~vgadev->owns; /* We already own everything, just mark locked & bye bye */ if (wants == 0) goto lock_them; /* * We don't need to request a legacy resource, we just enable * appropriate decoding and go. */ legacy_wants = wants & VGA_RSRC_LEGACY_MASK; if (legacy_wants == 0) goto enable_them; /* Ok, we don't, let's find out who we need to kick off */ list_for_each_entry(conflict, &vga_list, list) { unsigned int lwants = legacy_wants; unsigned int change_bridge = 0; /* Don't conflict with myself */ if (vgadev == conflict) continue; /* * We have a possible conflict. Before we go further, we must * check if we sit on the same bus as the conflicting device. * If we don't, then we must tie both IO and MEM resources * together since there is only a single bit controlling * VGA forwarding on P2P bridges. */ if (vgadev->pdev->bus != conflict->pdev->bus) { change_bridge = 1; lwants = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; } /* * Check if the guy has a lock on the resource. If he does, * return the conflicting entry. */ if (conflict->locks & lwants) return conflict; /* * Ok, now check if it owns the resource we want. We can * lock resources that are not decoded; therefore a device * can own resources it doesn't decode. */ match = lwants & conflict->owns; if (!match) continue; /* * Looks like he doesn't have a lock, we can steal them * from him. */ flags = 0; pci_bits = 0; /* * If we can't control legacy resources via the bridge, we * also need to disable normal decoding. */ if (!conflict->bridge_has_one_vga) { if ((match & conflict->decodes) & VGA_RSRC_LEGACY_MEM) pci_bits |= PCI_COMMAND_MEMORY; if ((match & conflict->decodes) & VGA_RSRC_LEGACY_IO) pci_bits |= PCI_COMMAND_IO; if (pci_bits) flags |= PCI_VGA_STATE_CHANGE_DECODES; } if (change_bridge) flags |= PCI_VGA_STATE_CHANGE_BRIDGE; pci_set_vga_state(conflict->pdev, false, pci_bits, flags); conflict->owns &= ~match; /* If we disabled normal decoding, reflect it in owns */ if (pci_bits & PCI_COMMAND_MEMORY) conflict->owns &= ~VGA_RSRC_NORMAL_MEM; if (pci_bits & PCI_COMMAND_IO) conflict->owns &= ~VGA_RSRC_NORMAL_IO; } enable_them: /* * Ok, we got it, everybody conflicting has been disabled, let's * enable us. Mark any bits in "owns" regardless of whether we * decoded them. We can lock resources we don't decode, therefore * we must track them via "owns". */ flags = 0; pci_bits = 0; if (!vgadev->bridge_has_one_vga) { flags |= PCI_VGA_STATE_CHANGE_DECODES; if (wants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM)) pci_bits |= PCI_COMMAND_MEMORY; if (wants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO)) pci_bits |= PCI_COMMAND_IO; } if (wants & VGA_RSRC_LEGACY_MASK) flags |= PCI_VGA_STATE_CHANGE_BRIDGE; pci_set_vga_state(vgadev->pdev, true, pci_bits, flags); vgadev->owns |= wants; lock_them: vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK); if (rsrc & VGA_RSRC_LEGACY_IO) vgadev->io_lock_cnt++; if (rsrc & VGA_RSRC_LEGACY_MEM) vgadev->mem_lock_cnt++; if (rsrc & VGA_RSRC_NORMAL_IO) vgadev->io_norm_cnt++; if (rsrc & VGA_RSRC_NORMAL_MEM) vgadev->mem_norm_cnt++; return NULL; } static void __vga_put(struct vga_device *vgadev, unsigned int rsrc) { struct device *dev = &vgadev->pdev->dev; unsigned int old_locks = vgadev->locks; vgaarb_dbg(dev, "%s\n", __func__); /* * Update our counters and account for equivalent legacy resources * if we decode them. */ if ((rsrc & VGA_RSRC_NORMAL_IO) && vgadev->io_norm_cnt > 0) { vgadev->io_norm_cnt--; if (vgadev->decodes & VGA_RSRC_LEGACY_IO) rsrc |= VGA_RSRC_LEGACY_IO; } if ((rsrc & VGA_RSRC_NORMAL_MEM) && vgadev->mem_norm_cnt > 0) { vgadev->mem_norm_cnt--; if (vgadev->decodes & VGA_RSRC_LEGACY_MEM) rsrc |= VGA_RSRC_LEGACY_MEM; } if ((rsrc & VGA_RSRC_LEGACY_IO) && vgadev->io_lock_cnt > 0) vgadev->io_lock_cnt--; if ((rsrc & VGA_RSRC_LEGACY_MEM) && vgadev->mem_lock_cnt > 0) vgadev->mem_lock_cnt--; /* * Just clear lock bits, we do lazy operations so we don't really * have to bother about anything else at this point. */ if (vgadev->io_lock_cnt == 0) vgadev->locks &= ~VGA_RSRC_LEGACY_IO; if (vgadev->mem_lock_cnt == 0) vgadev->locks &= ~VGA_RSRC_LEGACY_MEM; /* * Kick the wait queue in case somebody was waiting if we actually * released something. */ if (old_locks != vgadev->locks) wake_up_all(&vga_wait_queue); } /** * vga_get - acquire & lock VGA resources * @pdev: PCI device of the VGA card or NULL for the system default * @rsrc: bit mask of resources to acquire and lock * @interruptible: blocking should be interruptible by signals ? * * Acquire VGA resources for the given card and mark those resources * locked. If the resources requested are "normal" (and not legacy) * resources, the arbiter will first check whether the card is doing legacy * decoding for that type of resource. If yes, the lock is "converted" into * a legacy resource lock. * * The arbiter will first look for all VGA cards that might conflict and disable * their IOs and/or Memory access, including VGA forwarding on P2P bridges if * necessary, so that the requested resources can be used. Then, the card is * marked as locking these resources and the IO and/or Memory accesses are * enabled on the card (including VGA forwarding on parent P2P bridges if any). * * This function will block if some conflicting card is already locking one of * the required resources (or any resource on a different bus segment, since P2P * bridges don't differentiate VGA memory and IO afaik). You can indicate * whether this blocking should be interruptible by a signal (for userland * interface) or not. * * Must not be called at interrupt time or in atomic context. If the card * already owns the resources, the function succeeds. Nested calls are * supported (a per-resource counter is maintained) * * On success, release the VGA resource again with vga_put(). * * Returns: * * 0 on success, negative error code on failure. */ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) { struct vga_device *vgadev, *conflict; unsigned long flags; wait_queue_entry_t wait; int rc = 0; vga_check_first_use(); /* The caller should check for this, but let's be sure */ if (pdev == NULL) pdev = vga_default_device(); if (pdev == NULL) return 0; for (;;) { spin_lock_irqsave(&vga_lock, flags); vgadev = vgadev_find(pdev); if (vgadev == NULL) { spin_unlock_irqrestore(&vga_lock, flags); rc = -ENODEV; break; } conflict = __vga_tryget(vgadev, rsrc); spin_unlock_irqrestore(&vga_lock, flags); if (conflict == NULL) break; /* * We have a conflict; we wait until somebody kicks the * work queue. Currently we have one work queue that we * kick each time some resources are released, but it would * be fairly easy to have a per-device one so that we only * need to attach to the conflicting device. */ init_waitqueue_entry(&wait, current); add_wait_queue(&vga_wait_queue, &wait); set_current_state(interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); if (interruptible && signal_pending(current)) { __set_current_state(TASK_RUNNING); remove_wait_queue(&vga_wait_queue, &wait); rc = -ERESTARTSYS; break; } schedule(); remove_wait_queue(&vga_wait_queue, &wait); } return rc; } EXPORT_SYMBOL(vga_get); /** * vga_tryget - try to acquire & lock legacy VGA resources * @pdev: PCI device of VGA card or NULL for system default * @rsrc: bit mask of resources to acquire and lock * * Perform the same operation as vga_get(), but return an error (-EBUSY) * instead of blocking if the resources are already locked by another card. * Can be called in any context. * * On success, release the VGA resource again with vga_put(). * * Returns: * * 0 on success, negative error code on failure. */ static int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { struct vga_device *vgadev; unsigned long flags; int rc = 0; vga_check_first_use(); /* The caller should check for this, but let's be sure */ if (pdev == NULL) pdev = vga_default_device(); if (pdev == NULL) return 0; spin_lock_irqsave(&vga_lock, flags); vgadev = vgadev_find(pdev); if (vgadev == NULL) { rc = -ENODEV; goto bail; } if (__vga_tryget(vgadev, rsrc)) rc = -EBUSY; bail: spin_unlock_irqrestore(&vga_lock, flags); return rc; } /** * vga_put - release lock on legacy VGA resources * @pdev: PCI device of VGA card or NULL for system default * @rsrc: bit mask of resource to release * * Release resources previously locked by vga_get() or vga_tryget(). The * resources aren't disabled right away, so that a subsequent vga_get() on * the same card will succeed immediately. Resources have a counter, so * locks are only released if the counter reaches 0. */ void vga_put(struct pci_dev *pdev, unsigned int rsrc) { struct vga_device *vgadev; unsigned long flags; /* The caller should check for this, but let's be sure */ if (pdev == NULL) pdev = vga_default_device(); if (pdev == NULL) return; spin_lock_irqsave(&vga_lock, flags); vgadev = vgadev_find(pdev); if (vgadev == NULL) goto bail; __vga_put(vgadev, rsrc); bail: spin_unlock_irqrestore(&vga_lock, flags); } EXPORT_SYMBOL(vga_put); static bool vga_is_firmware_default(struct pci_dev *pdev) { #if defined(CONFIG_X86) u64 base = screen_info.lfb_base; u64 size = screen_info.lfb_size; struct resource *r; u64 limit; /* Select the device owning the boot framebuffer if there is one */ if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) base |= (u64)screen_info.ext_lfb_base << 32; limit = base + size; /* Does firmware framebuffer belong to us? */ pci_dev_for_each_resource(pdev, r) { if (resource_type(r) != IORESOURCE_MEM) continue; if (!r->start || !r->end) continue; if (base < r->start || limit >= r->end) continue; return true; } #endif return false; } static bool vga_arb_integrated_gpu(struct device *dev) { #if defined(CONFIG_ACPI) struct acpi_device *adev = ACPI_COMPANION(dev); return adev && !strcmp(acpi_device_hid(adev), ACPI_VIDEO_HID); #else return false; #endif } /* * Return true if vgadev is a better default VGA device than the best one * we've seen so far. */ static bool vga_is_boot_device(struct vga_device *vgadev) { struct vga_device *boot_vga = vgadev_find(vga_default_device()); struct pci_dev *pdev = vgadev->pdev; u16 cmd, boot_cmd; /* * We select the default VGA device in this order: * Firmware framebuffer (see vga_arb_select_default_device()) * Legacy VGA device (owns VGA_RSRC_LEGACY_MASK) * Non-legacy integrated device (see vga_arb_select_default_device()) * Non-legacy discrete device (see vga_arb_select_default_device()) * Other device (see vga_arb_select_default_device()) */ /* * We always prefer a firmware default device, so if we've already * found one, there's no need to consider vgadev. */ if (boot_vga && boot_vga->is_firmware_default) return false; if (vga_is_firmware_default(pdev)) { vgadev->is_firmware_default = true; return true; } /* * A legacy VGA device has MEM and IO enabled and any bridges * leading to it have PCI_BRIDGE_CTL_VGA enabled so the legacy * resources ([mem 0xa0000-0xbffff], [io 0x3b0-0x3bb], etc) are * routed to it. * * We use the first one we find, so if we've already found one, * vgadev is no better. */ if (boot_vga && (boot_vga->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK) return false; if ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK) return true; /* * If we haven't found a legacy VGA device, accept a non-legacy * device. It may have either IO or MEM enabled, and bridges may * not have PCI_BRIDGE_CTL_VGA enabled, so it may not be able to * use legacy VGA resources. Prefer an integrated GPU over others. */ pci_read_config_word(pdev, PCI_COMMAND, &cmd); if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { /* * An integrated GPU overrides a previous non-legacy * device. We expect only a single integrated GPU, but if * there are more, we use the *last* because that was the * previous behavior. */ if (vga_arb_integrated_gpu(&pdev->dev)) return true; /* * We prefer the first non-legacy discrete device we find. * If we already found one, vgadev is no better. */ if (boot_vga) { pci_read_config_word(boot_vga->pdev, PCI_COMMAND, &boot_cmd); if (boot_cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) return false; } return true; } /* * Vgadev has neither IO nor MEM enabled. If we haven't found any * other VGA devices, it is the best candidate so far. */ if (!boot_vga) return true; return false; } /* * Rules for using a bridge to control a VGA descendant decoding: if a bridge * has only one VGA descendant then it can be used to control the VGA routing * for that device. It should always use the bridge closest to the device to * control it. If a bridge has a direct VGA descendant, but also have a sub- * bridge VGA descendant then we cannot use that bridge to control the direct * VGA descendant. So for every device we register, we need to iterate all * its parent bridges so we can invalidate any devices using them properly. */ static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev) { struct vga_device *same_bridge_vgadev; struct pci_bus *new_bus, *bus; struct pci_dev *new_bridge, *bridge; vgadev->bridge_has_one_vga = true; if (list_empty(&vga_list)) { vgaarb_info(&vgadev->pdev->dev, "bridge control possible\n"); return; } /* Iterate the new device's bridge hierarchy */ new_bus = vgadev->pdev->bus; while (new_bus) { new_bridge = new_bus->self; /* Go through list of devices already registered */ list_for_each_entry(same_bridge_vgadev, &vga_list, list) { bus = same_bridge_vgadev->pdev->bus; bridge = bus->self; /* See if it shares a bridge with this device */ if (new_bridge == bridge) { /* * If its direct parent bridge is the same * as any bridge of this device then it can't * be used for that device. */ same_bridge_vgadev->bridge_has_one_vga = false; } /* * Now iterate the previous device's bridge hierarchy. * If the new device's parent bridge is in the other * device's hierarchy, we can't use it to control this * device. */ while (bus) { bridge = bus->self; if (bridge && bridge == vgadev->pdev->bus->self) vgadev->bridge_has_one_vga = false; bus = bus->parent; } } new_bus = new_bus->parent; } if (vgadev->bridge_has_one_vga) vgaarb_info(&vgadev->pdev->dev, "bridge control possible\n"); else vgaarb_info(&vgadev->pdev->dev, "no bridge control possible\n"); } /* * Currently, we assume that the "initial" setup of the system is not sane, * that is, we come up with conflicting devices and let the arbiter's * client decide if devices decodes legacy things or not. */ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev) { struct vga_device *vgadev; unsigned long flags; struct pci_bus *bus; struct pci_dev *bridge; u16 cmd; /* Allocate structure */ vgadev = kzalloc(sizeof(struct vga_device), GFP_KERNEL); if (vgadev == NULL) { vgaarb_err(&pdev->dev, "failed to allocate VGA arbiter data\n"); /* * What to do on allocation failure? For now, let's just do * nothing, I'm not sure there is anything saner to be done. */ return false; } /* Take lock & check for duplicates */ spin_lock_irqsave(&vga_lock, flags); if (vgadev_find(pdev) != NULL) { BUG_ON(1); goto fail; } vgadev->pdev = pdev; /* By default, assume we decode everything */ vgadev->decodes = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; /* By default, mark it as decoding */ vga_decode_count++; /* * Mark that we "own" resources based on our enables, we will * clear that below if the bridge isn't forwarding. */ pci_read_config_word(pdev, PCI_COMMAND, &cmd); if (cmd & PCI_COMMAND_IO) vgadev->owns |= VGA_RSRC_LEGACY_IO; if (cmd & PCI_COMMAND_MEMORY) vgadev->owns |= VGA_RSRC_LEGACY_MEM; /* Check if VGA cycles can get down to us */ bus = pdev->bus; while (bus) { bridge = bus->self; if (bridge) { u16 l; pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &l); if (!(l & PCI_BRIDGE_CTL_VGA)) { vgadev->owns = 0; break; } } bus = bus->parent; } if (vga_is_boot_device(vgadev)) { vgaarb_info(&pdev->dev, "setting as boot VGA device%s\n", vga_default_device() ? " (overriding previous)" : ""); vga_set_default_device(pdev); } vga_arbiter_check_bridge_sharing(vgadev); /* Add to the list */ list_add_tail(&vgadev->list, &vga_list); vga_count++; vgaarb_info(&pdev->dev, "VGA device added: decodes=%s,owns=%s,locks=%s\n", vga_iostate_to_str(vgadev->decodes), vga_iostate_to_str(vgadev->owns), vga_iostate_to_str(vgadev->locks)); spin_unlock_irqrestore(&vga_lock, flags); return true; fail: spin_unlock_irqrestore(&vga_lock, flags); kfree(vgadev); return false; } static bool vga_arbiter_del_pci_device(struct pci_dev *pdev) { struct vga_device *vgadev; unsigned long flags; bool ret = true; spin_lock_irqsave(&vga_lock, flags); vgadev = vgadev_find(pdev); if (vgadev == NULL) { ret = false; goto bail; } if (vga_default == pdev) vga_set_default_device(NULL); if (vgadev->decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)) vga_decode_count--; /* Remove entry from list */ list_del(&vgadev->list); vga_count--; /* Wake up all possible waiters */ wake_up_all(&vga_wait_queue); bail: spin_unlock_irqrestore(&vga_lock, flags); kfree(vgadev); return ret; } /* Called with the lock */ static void vga_update_device_decodes(struct vga_device *vgadev, unsigned int new_decodes) { struct device *dev = &vgadev->pdev->dev; unsigned int old_decodes = vgadev->decodes; unsigned int decodes_removed = ~new_decodes & old_decodes; unsigned int decodes_unlocked = vgadev->locks & decodes_removed; vgadev->decodes = new_decodes; vgaarb_info(dev, "VGA decodes changed: olddecodes=%s,decodes=%s:owns=%s\n", vga_iostate_to_str(old_decodes), vga_iostate_to_str(vgadev->decodes), vga_iostate_to_str(vgadev->owns)); /* If we removed locked decodes, lock count goes to zero, and release */ if (decodes_unlocked) { if (decodes_unlocked & VGA_RSRC_LEGACY_IO) vgadev->io_lock_cnt = 0; if (decodes_unlocked & VGA_RSRC_LEGACY_MEM) vgadev->mem_lock_cnt = 0; __vga_put(vgadev, decodes_unlocked); } /* Change decodes counter */ if (old_decodes & VGA_RSRC_LEGACY_MASK && !(new_decodes & VGA_RSRC_LEGACY_MASK)) vga_decode_count--; if (!(old_decodes & VGA_RSRC_LEGACY_MASK) && new_decodes & VGA_RSRC_LEGACY_MASK) vga_decode_count++; vgaarb_dbg(dev, "decoding count now is: %d\n", vga_decode_count); } static void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) { struct vga_device *vgadev; unsigned long flags; decodes &= VGA_RSRC_LEGACY_MASK; spin_lock_irqsave(&vga_lock, flags); vgadev = vgadev_find(pdev); if (vgadev == NULL) goto bail; /* Don't let userspace futz with kernel driver decodes */ if (userspace && vgadev->set_decode) goto bail; /* Update the device decodes + counter */ vga_update_device_decodes(vgadev, decodes); /* * XXX If somebody is going from "doesn't decode" to "decodes" * state here, additional care must be taken as we may have pending * ownership of non-legacy region. */ bail: spin_unlock_irqrestore(&vga_lock, flags); } /** * vga_set_legacy_decoding * @pdev: PCI device of the VGA card * @decodes: bit mask of what legacy regions the card decodes * * Indicate to the arbiter if the card decodes legacy VGA IOs, legacy VGA * Memory, both, or none. All cards default to both, the card driver (fbdev for * example) should tell the arbiter if it has disabled legacy decoding, so the * card can be left out of the arbitration process (and can be safe to take * interrupts at any time. */ void vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes) { __vga_set_legacy_decoding(pdev, decodes, false); } EXPORT_SYMBOL(vga_set_legacy_decoding); /** * vga_client_register - register or unregister a VGA arbitration client * @pdev: PCI device of the VGA client * @set_decode: VGA decode change callback * * Clients have two callback mechanisms they can use. * * @set_decode callback: If a client can disable its GPU VGA resource, it * will get a callback from this to set the encode/decode state. * * Rationale: we cannot disable VGA decode resources unconditionally * because some single GPU laptops seem to require ACPI or BIOS access to * the VGA registers to control things like backlights etc. Hopefully newer * multi-GPU laptops do something saner, and desktops won't have any * special ACPI for this. The driver will get a callback when VGA * arbitration is first used by userspace since some older X servers have * issues. * * Does not check whether a client for @pdev has been registered already. * * To unregister, call vga_client_unregister(). * * Returns: 0 on success, -ENODEV on failure */ int vga_client_register(struct pci_dev *pdev, unsigned int (*set_decode)(struct pci_dev *pdev, bool decode)) { unsigned long flags; struct vga_device *vgadev; spin_lock_irqsave(&vga_lock, flags); vgadev = vgadev_find(pdev); if (vgadev) vgadev->set_decode = set_decode; spin_unlock_irqrestore(&vga_lock, flags); if (!vgadev) return -ENODEV; return 0; } EXPORT_SYMBOL(vga_client_register); /* * Char driver implementation * * Semantics is: * * open : Open user instance of the arbiter. By default, it's * attached to the default VGA device of the system. * * close : Close user instance, release locks * * read : Return a string indicating the status of the target. * An IO state string is of the form {io,mem,io+mem,none}, * mc and ic are respectively mem and io lock counts (for * debugging/diagnostic only). "decodes" indicate what the * card currently decodes, "owns" indicates what is currently * enabled on it, and "locks" indicates what is locked by this * card. If the card is unplugged, we get "invalid" then for * card_ID and an -ENODEV error is returned for any command * until a new card is targeted * * "<card_ID>,decodes=<io_state>,owns=<io_state>,locks=<io_state> (ic,mc)" * * write : write a command to the arbiter. List of commands is: * * target <card_ID> : switch target to card <card_ID> (see below) * lock <io_state> : acquire locks on target ("none" is invalid io_state) * trylock <io_state> : non-blocking acquire locks on target * unlock <io_state> : release locks on target * unlock all : release all locks on target held by this user * decodes <io_state> : set the legacy decoding attributes for the card * * poll : event if something change on any card (not just the target) * * card_ID is of the form "PCI:domain:bus:dev.fn". It can be set to "default" * to go back to the system default card (TODO: not implemented yet). * Currently, only PCI is supported as a prefix, but the userland API may * support other bus types in the future, even if the current kernel * implementation doesn't. * * Note about locks: * * The driver keeps track of which user has what locks on which card. It * supports stacking, like the kernel one. This complicates the implementation * a bit, but makes the arbiter more tolerant to userspace problems and able * to properly cleanup in all cases when a process dies. * Currently, a max of 16 cards simultaneously can have locks issued from * userspace for a given user (file descriptor instance) of the arbiter. * * If the device is hot-unplugged, there is a hook inside the module to notify * it being added/removed in the system and automatically added/removed in * the arbiter. */ #define MAX_USER_CARDS CONFIG_VGA_ARB_MAX_GPUS #define PCI_INVALID_CARD ((struct pci_dev *)-1UL) /* Each user has an array of these, tracking which cards have locks */ struct vga_arb_user_card { struct pci_dev *pdev; unsigned int mem_cnt; unsigned int io_cnt; }; struct vga_arb_private { struct list_head list; struct pci_dev *target; struct vga_arb_user_card cards[MAX_USER_CARDS]; spinlock_t lock; }; static LIST_HEAD(vga_user_list); static DEFINE_SPINLOCK(vga_user_lock); /* * Take a string in the format: "PCI:domain:bus:dev.fn" and return the * respective values. If the string is not in this format, return 0. */ static int vga_pci_str_to_vars(char *buf, int count, unsigned int *domain, unsigned int *bus, unsigned int *devfn) { int n; unsigned int slot, func; n = sscanf(buf, "PCI:%x:%x:%x.%x", domain, bus, &slot, &func); if (n != 4) return 0; *devfn = PCI_DEVFN(slot, func); return 1; } static ssize_t vga_arb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct vga_arb_private *priv = file->private_data; struct vga_device *vgadev; struct pci_dev *pdev; unsigned long flags; size_t len; int rc; char *lbuf; lbuf = kmalloc(1024, GFP_KERNEL); if (lbuf == NULL) return -ENOMEM; /* Protect vga_list */ spin_lock_irqsave(&vga_lock, flags); /* If we are targeting the default, use it */ pdev = priv->target; if (pdev == NULL || pdev == PCI_INVALID_CARD) { spin_unlock_irqrestore(&vga_lock, flags); len = sprintf(lbuf, "invalid"); goto done; } /* Find card vgadev structure */ vgadev = vgadev_find(pdev); if (vgadev == NULL) { /* * Wow, it's not in the list, that shouldn't happen, let's * fix us up and return invalid card. */ spin_unlock_irqrestore(&vga_lock, flags); len = sprintf(lbuf, "invalid"); goto done; } /* Fill the buffer with info */ len = snprintf(lbuf, 1024, "count:%d,PCI:%s,decodes=%s,owns=%s,locks=%s(%u:%u)\n", vga_decode_count, pci_name(pdev), vga_iostate_to_str(vgadev->decodes), vga_iostate_to_str(vgadev->owns), vga_iostate_to_str(vgadev->locks), vgadev->io_lock_cnt, vgadev->mem_lock_cnt); spin_unlock_irqrestore(&vga_lock, flags); done: /* Copy that to user */ if (len > count) len = count; rc = copy_to_user(buf, lbuf, len); kfree(lbuf); if (rc) return -EFAULT; return len; } /* * TODO: To avoid parsing inside kernel and to improve the speed we may * consider use ioctl here */ static ssize_t vga_arb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct vga_arb_private *priv = file->private_data; struct vga_arb_user_card *uc = NULL; struct pci_dev *pdev; unsigned int io_state; char kbuf[64], *curr_pos; size_t remaining = count; int ret_val; int i; if (count >= sizeof(kbuf)) return -EINVAL; if (copy_from_user(kbuf, buf, count)) return -EFAULT; curr_pos = kbuf; kbuf[count] = '\0'; if (strncmp(curr_pos, "lock ", 5) == 0) { curr_pos += 5; remaining -= 5; pr_debug("client 0x%p called 'lock'\n", priv); if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { ret_val = -EPROTO; goto done; } if (io_state == VGA_RSRC_NONE) { ret_val = -EPROTO; goto done; } pdev = priv->target; if (priv->target == NULL) { ret_val = -ENODEV; goto done; } vga_get_uninterruptible(pdev, io_state); /* Update the client's locks lists */ for (i = 0; i < MAX_USER_CARDS; i++) { if (priv->cards[i].pdev == pdev) { if (io_state & VGA_RSRC_LEGACY_IO) priv->cards[i].io_cnt++; if (io_state & VGA_RSRC_LEGACY_MEM) priv->cards[i].mem_cnt++; break; } } ret_val = count; goto done; } else if (strncmp(curr_pos, "unlock ", 7) == 0) { curr_pos += 7; remaining -= 7; pr_debug("client 0x%p called 'unlock'\n", priv); if (strncmp(curr_pos, "all", 3) == 0) io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; else { if (!vga_str_to_iostate (curr_pos, remaining, &io_state)) { ret_val = -EPROTO; goto done; } /* TODO: Add this? if (io_state == VGA_RSRC_NONE) { ret_val = -EPROTO; goto done; } */ } pdev = priv->target; if (priv->target == NULL) { ret_val = -ENODEV; goto done; } for (i = 0; i < MAX_USER_CARDS; i++) { if (priv->cards[i].pdev == pdev) uc = &priv->cards[i]; } if (!uc) { ret_val = -EINVAL; goto done; } if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) { ret_val = -EINVAL; goto done; } if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) { ret_val = -EINVAL; goto done; } vga_put(pdev, io_state); if (io_state & VGA_RSRC_LEGACY_IO) uc->io_cnt--; if (io_state & VGA_RSRC_LEGACY_MEM) uc->mem_cnt--; ret_val = count; goto done; } else if (strncmp(curr_pos, "trylock ", 8) == 0) { curr_pos += 8; remaining -= 8; pr_debug("client 0x%p called 'trylock'\n", priv); if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { ret_val = -EPROTO; goto done; } /* TODO: Add this? if (io_state == VGA_RSRC_NONE) { ret_val = -EPROTO; goto done; } */ pdev = priv->target; if (priv->target == NULL) { ret_val = -ENODEV; goto done; } if (vga_tryget(pdev, io_state)) { /* Update the client's locks lists... */ for (i = 0; i < MAX_USER_CARDS; i++) { if (priv->cards[i].pdev == pdev) { if (io_state & VGA_RSRC_LEGACY_IO) priv->cards[i].io_cnt++; if (io_state & VGA_RSRC_LEGACY_MEM) priv->cards[i].mem_cnt++; break; } } ret_val = count; goto done; } else { ret_val = -EBUSY; goto done; } } else if (strncmp(curr_pos, "target ", 7) == 0) { unsigned int domain, bus, devfn; struct vga_device *vgadev; curr_pos += 7; remaining -= 7; pr_debug("client 0x%p called 'target'\n", priv); /* If target is default */ if (!strncmp(curr_pos, "default", 7)) pdev = pci_dev_get(vga_default_device()); else { if (!vga_pci_str_to_vars(curr_pos, remaining, &domain, &bus, &devfn)) { ret_val = -EPROTO; goto done; } pdev = pci_get_domain_bus_and_slot(domain, bus, devfn); if (!pdev) { pr_debug("invalid PCI address %04x:%02x:%02x.%x\n", domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); ret_val = -ENODEV; goto done; } pr_debug("%s ==> %04x:%02x:%02x.%x pdev %p\n", curr_pos, domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), pdev); } vgadev = vgadev_find(pdev); pr_debug("vgadev %p\n", vgadev); if (vgadev == NULL) { if (pdev) { vgaarb_dbg(&pdev->dev, "not a VGA device\n"); pci_dev_put(pdev); } ret_val = -ENODEV; goto done; } priv->target = pdev; for (i = 0; i < MAX_USER_CARDS; i++) { if (priv->cards[i].pdev == pdev) break; if (priv->cards[i].pdev == NULL) { priv->cards[i].pdev = pdev; priv->cards[i].io_cnt = 0; priv->cards[i].mem_cnt = 0; break; } } if (i == MAX_USER_CARDS) { vgaarb_dbg(&pdev->dev, "maximum user cards (%d) number reached, ignoring this one!\n", MAX_USER_CARDS); pci_dev_put(pdev); /* XXX: Which value to return? */ ret_val = -ENOMEM; goto done; } ret_val = count; pci_dev_put(pdev); goto done; } else if (strncmp(curr_pos, "decodes ", 8) == 0) { curr_pos += 8; remaining -= 8; pr_debug("client 0x%p called 'decodes'\n", priv); if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { ret_val = -EPROTO; goto done; } pdev = priv->target; if (priv->target == NULL) { ret_val = -ENODEV; goto done; } __vga_set_legacy_decoding(pdev, io_state, true); ret_val = count; goto done; } /* If we got here, the message written is not part of the protocol! */ return -EPROTO; done: return ret_val; } static __poll_t vga_arb_fpoll(struct file *file, poll_table *wait) { pr_debug("%s\n", __func__); poll_wait(file, &vga_wait_queue, wait); return EPOLLIN; } static int vga_arb_open(struct inode *inode, struct file *file) { struct vga_arb_private *priv; unsigned long flags; pr_debug("%s\n", __func__); priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (priv == NULL) return -ENOMEM; spin_lock_init(&priv->lock); file->private_data = priv; spin_lock_irqsave(&vga_user_lock, flags); list_add(&priv->list, &vga_user_list); spin_unlock_irqrestore(&vga_user_lock, flags); /* Set the client's lists of locks */ priv->target = vga_default_device(); /* Maybe this is still null! */ priv->cards[0].pdev = priv->target; priv->cards[0].io_cnt = 0; priv->cards[0].mem_cnt = 0; return 0; } static int vga_arb_release(struct inode *inode, struct file *file) { struct vga_arb_private *priv = file->private_data; struct vga_arb_user_card *uc; unsigned long flags; int i; pr_debug("%s\n", __func__); spin_lock_irqsave(&vga_user_lock, flags); list_del(&priv->list); for (i = 0; i < MAX_USER_CARDS; i++) { uc = &priv->cards[i]; if (uc->pdev == NULL) continue; vgaarb_dbg(&uc->pdev->dev, "uc->io_cnt == %d, uc->mem_cnt == %d\n", uc->io_cnt, uc->mem_cnt); while (uc->io_cnt--) vga_put(uc->pdev, VGA_RSRC_LEGACY_IO); while (uc->mem_cnt--) vga_put(uc->pdev, VGA_RSRC_LEGACY_MEM); } spin_unlock_irqrestore(&vga_user_lock, flags); kfree(priv); return 0; } /* * Callback any registered clients to let them know we have a change in VGA * cards. */ static void vga_arbiter_notify_clients(void) { struct vga_device *vgadev; unsigned long flags; unsigned int new_decodes; bool new_state; if (!vga_arbiter_used) return; new_state = (vga_count > 1) ? false : true; spin_lock_irqsave(&vga_lock, flags); list_for_each_entry(vgadev, &vga_list, list) { if (vgadev->set_decode) { new_decodes = vgadev->set_decode(vgadev->pdev, new_state); vga_update_device_decodes(vgadev, new_decodes); } } spin_unlock_irqrestore(&vga_lock, flags); } static int pci_notify(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; struct pci_dev *pdev = to_pci_dev(dev); bool notify = false; vgaarb_dbg(dev, "%s\n", __func__); /* Only deal with VGA class devices */ if (!pci_is_vga(pdev)) return 0; /* * For now, we're only interested in devices added and removed. * I didn't test this thing here, so someone needs to double check * for the cases of hot-pluggable VGA cards. */ if (action == BUS_NOTIFY_ADD_DEVICE) notify = vga_arbiter_add_pci_device(pdev); else if (action == BUS_NOTIFY_DEL_DEVICE) notify = vga_arbiter_del_pci_device(pdev); if (notify) vga_arbiter_notify_clients(); return 0; } static struct notifier_block pci_notifier = { .notifier_call = pci_notify, }; static const struct file_operations vga_arb_device_fops = { .read = vga_arb_read, .write = vga_arb_write, .poll = vga_arb_fpoll, .open = vga_arb_open, .release = vga_arb_release, .llseek = noop_llseek, }; static struct miscdevice vga_arb_device = { MISC_DYNAMIC_MINOR, "vga_arbiter", &vga_arb_device_fops }; static int __init vga_arb_device_init(void) { int rc; struct pci_dev *pdev; rc = misc_register(&vga_arb_device); if (rc < 0) pr_err("error %d registering device\n", rc); bus_register_notifier(&pci_bus_type, &pci_notifier); /* Add all VGA class PCI devices by default */ pdev = NULL; while ((pdev = pci_get_subsys(PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, pdev)) != NULL) { if (pci_is_vga(pdev)) vga_arbiter_add_pci_device(pdev); } pr_info("loaded\n"); return rc; } subsys_initcall_sync(vga_arb_device_init);
17 8312 1 2 41 213 7717 17 7277 3286 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PERCPU_COUNTER_H #define _LINUX_PERCPU_COUNTER_H /* * A simple "approximate counter" for use in ext2 and ext3 superblocks. * * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4. */ #include <linux/spinlock.h> #include <linux/smp.h> #include <linux/list.h> #include <linux/threads.h> #include <linux/percpu.h> #include <linux/types.h> /* percpu_counter batch for local add or sub */ #define PERCPU_COUNTER_LOCAL_BATCH INT_MAX #ifdef CONFIG_SMP struct percpu_counter { raw_spinlock_t lock; s64 count; #ifdef CONFIG_HOTPLUG_CPU struct list_head list; /* All percpu_counters are on a list */ #endif s32 __percpu *counters; }; extern int percpu_counter_batch; int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount, gfp_t gfp, u32 nr_counters, struct lock_class_key *key); #define percpu_counter_init_many(fbc, value, gfp, nr_counters) \ ({ \ static struct lock_class_key __key; \ \ __percpu_counter_init_many(fbc, value, gfp, nr_counters,\ &__key); \ }) #define percpu_counter_init(fbc, value, gfp) \ percpu_counter_init_many(fbc, value, gfp, 1) void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters); static inline void percpu_counter_destroy(struct percpu_counter *fbc) { percpu_counter_destroy_many(fbc, 1); } void percpu_counter_set(struct percpu_counter *fbc, s64 amount); void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch); s64 __percpu_counter_sum(struct percpu_counter *fbc); int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); bool __percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount, s32 batch); void percpu_counter_sync(struct percpu_counter *fbc); static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) { return __percpu_counter_compare(fbc, rhs, percpu_counter_batch); } static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) { percpu_counter_add_batch(fbc, amount, percpu_counter_batch); } static inline bool percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount) { return __percpu_counter_limited_add(fbc, limit, amount, percpu_counter_batch); } /* * With percpu_counter_add_local() and percpu_counter_sub_local(), counts * are accumulated in local per cpu counter and not in fbc->count until * local count overflows PERCPU_COUNTER_LOCAL_BATCH. This makes counter * write efficient. * But percpu_counter_sum(), instead of percpu_counter_read(), needs to be * used to add up the counts from each CPU to account for all the local * counts. So percpu_counter_add_local() and percpu_counter_sub_local() * should be used when a counter is updated frequently and read rarely. */ static inline void percpu_counter_add_local(struct percpu_counter *fbc, s64 amount) { percpu_counter_add_batch(fbc, amount, PERCPU_COUNTER_LOCAL_BATCH); } static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) { s64 ret = __percpu_counter_sum(fbc); return ret < 0 ? 0 : ret; } static inline s64 percpu_counter_sum(struct percpu_counter *fbc) { return __percpu_counter_sum(fbc); } static inline s64 percpu_counter_read(struct percpu_counter *fbc) { return fbc->count; } /* * It is possible for the percpu_counter_read() to return a small negative * number for some counter which should never be negative. * */ static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) { /* Prevent reloads of fbc->count */ s64 ret = READ_ONCE(fbc->count); if (ret >= 0) return ret; return 0; } static inline bool percpu_counter_initialized(struct percpu_counter *fbc) { return (fbc->counters != NULL); } #else /* !CONFIG_SMP */ struct percpu_counter { s64 count; }; static inline int percpu_counter_init_many(struct percpu_counter *fbc, s64 amount, gfp_t gfp, u32 nr_counters) { u32 i; for (i = 0; i < nr_counters; i++) fbc[i].count = amount; return 0; } static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp) { return percpu_counter_init_many(fbc, amount, gfp, 1); } static inline void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters) { } static inline void percpu_counter_destroy(struct percpu_counter *fbc) { } static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount) { fbc->count = amount; } static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) { if (fbc->count > rhs) return 1; else if (fbc->count < rhs) return -1; else return 0; } static inline int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) { return percpu_counter_compare(fbc, rhs); } static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) { unsigned long flags; local_irq_save(flags); fbc->count += amount; local_irq_restore(flags); } static inline bool percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount) { unsigned long flags; bool good = false; s64 count; if (amount == 0) return true; local_irq_save(flags); count = fbc->count + amount; if ((amount > 0 && count <= limit) || (amount < 0 && count >= limit)) { fbc->count = count; good = true; } local_irq_restore(flags); return good; } /* non-SMP percpu_counter_add_local is the same with percpu_counter_add */ static inline void percpu_counter_add_local(struct percpu_counter *fbc, s64 amount) { percpu_counter_add(fbc, amount); } static inline void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) { percpu_counter_add(fbc, amount); } static inline s64 percpu_counter_read(struct percpu_counter *fbc) { return fbc->count; } /* * percpu_counter is intended to track positive numbers. In the UP case the * number should never be negative. */ static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) { return fbc->count; } static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) { return percpu_counter_read_positive(fbc); } static inline s64 percpu_counter_sum(struct percpu_counter *fbc) { return percpu_counter_read(fbc); } static inline bool percpu_counter_initialized(struct percpu_counter *fbc) { return true; } static inline void percpu_counter_sync(struct percpu_counter *fbc) { } #endif /* CONFIG_SMP */ static inline void percpu_counter_inc(struct percpu_counter *fbc) { percpu_counter_add(fbc, 1); } static inline void percpu_counter_dec(struct percpu_counter *fbc) { percpu_counter_add(fbc, -1); } static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount) { percpu_counter_add(fbc, -amount); } static inline void percpu_counter_sub_local(struct percpu_counter *fbc, s64 amount) { percpu_counter_add_local(fbc, -amount); } #endif /* _LINUX_PERCPU_COUNTER_H */
3 129 3 213 193 213 53 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 /* SPDX-License-Identifier: GPL-2.0 */ /* * INETPEER - A storage for permanent information about peers * * Authors: Andrey V. Savochkin <saw@msu.ru> */ #ifndef _NET_INETPEER_H #define _NET_INETPEER_H #include <linux/types.h> #include <linux/init.h> #include <linux/jiffies.h> #include <linux/spinlock.h> #include <linux/rtnetlink.h> #include <net/ipv6.h> #include <linux/atomic.h> /* IPv4 address key for cache lookups */ struct ipv4_addr_key { __be32 addr; int vif; }; #define INETPEER_MAXKEYSZ (sizeof(struct in6_addr) / sizeof(u32)) struct inetpeer_addr { union { struct ipv4_addr_key a4; struct in6_addr a6; u32 key[INETPEER_MAXKEYSZ]; }; __u16 family; }; struct inet_peer { struct rb_node rb_node; struct inetpeer_addr daddr; u32 metrics[RTAX_MAX]; u32 rate_tokens; /* rate limiting for ICMP */ u32 n_redirects; unsigned long rate_last; /* * Once inet_peer is queued for deletion (refcnt == 0), following field * is not available: rid * We can share memory with rcu_head to help keep inet_peer small. */ union { struct { atomic_t rid; /* Frag reception counter */ }; struct rcu_head rcu; }; /* following fields might be frequently dirtied */ __u32 dtime; /* the time of last use of not referenced entries */ refcount_t refcnt; }; struct inet_peer_base { struct rb_root rb_root; seqlock_t lock; int total; }; void inet_peer_base_init(struct inet_peer_base *); void inet_initpeers(void) __init; #define INETPEER_METRICS_NEW (~(u32) 0) static inline void inetpeer_set_addr_v4(struct inetpeer_addr *iaddr, __be32 ip) { iaddr->a4.addr = ip; iaddr->a4.vif = 0; iaddr->family = AF_INET; } static inline __be32 inetpeer_get_addr_v4(struct inetpeer_addr *iaddr) { return iaddr->a4.addr; } static inline void inetpeer_set_addr_v6(struct inetpeer_addr *iaddr, struct in6_addr *in6) { iaddr->a6 = *in6; iaddr->family = AF_INET6; } static inline struct in6_addr *inetpeer_get_addr_v6(struct inetpeer_addr *iaddr) { return &iaddr->a6; } /* can be called with or without local BH being disabled */ struct inet_peer *inet_getpeer(struct inet_peer_base *base, const struct inetpeer_addr *daddr); static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base, __be32 v4daddr, int vif) { struct inetpeer_addr daddr; daddr.a4.addr = v4daddr; daddr.a4.vif = vif; daddr.family = AF_INET; return inet_getpeer(base, &daddr); } static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base, const struct in6_addr *v6daddr) { struct inetpeer_addr daddr; daddr.a6 = *v6daddr; daddr.family = AF_INET6; return inet_getpeer(base, &daddr); } static inline int inetpeer_addr_cmp(const struct inetpeer_addr *a, const struct inetpeer_addr *b) { int i, n; if (a->family == AF_INET) n = sizeof(a->a4) / sizeof(u32); else n = sizeof(a->a6) / sizeof(u32); for (i = 0; i < n; i++) { if (a->key[i] == b->key[i]) continue; if (a->key[i] < b->key[i]) return -1; return 1; } return 0; } /* can be called from BH context or outside */ void inet_putpeer(struct inet_peer *p); bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout); void inetpeer_invalidate_tree(struct inet_peer_base *); #endif /* _NET_INETPEER_H */
1 1 1 1 2 1 1 1 1 1 1 1 1 6 6 3 3 6 6 2 7 4 3 8 3 5 5 2 2 2 2 2 2 1 1 1 2 4 1 1 1 1 3 3 3 2 2 1 2 3 3 22 4 2 5 1 1 5 2 3 5 1 5 5 2 4 5 2 4 4 4 22 22 4 22 2 21 21 21 20 5 1 4 15 9 9 8 2 12 4 8 8 8 4 13 22 4 4 4 23 23 12 14 14 14 21 20 20 20 9 9 1 1 8 7 1 7 6 6 6 6 8 9 15 8 6 2 6 7 5 2 2 8 1 7 3 4 7 4 1 3 4 9 8 7 3 2 3 2 2 2 9 3 3 3 3 3 3 2 2 1 1 3 4 3 3 2 2 4 3 1 1 1 1 1 1 1 1 6 5 4 3 2 6 72 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 // SPDX-License-Identifier: GPL-2.0-or-later /* GTP according to GSM TS 09.60 / 3GPP TS 29.060 * * (C) 2012-2014 by sysmocom - s.f.m.c. GmbH * (C) 2016 by Pablo Neira Ayuso <pablo@netfilter.org> * * Author: Harald Welte <hwelte@sysmocom.de> * Pablo Neira Ayuso <pablo@netfilter.org> * Andreas Schultz <aschultz@travelping.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/skbuff.h> #include <linux/udp.h> #include <linux/rculist.h> #include <linux/jhash.h> #include <linux/if_tunnel.h> #include <linux/net.h> #include <linux/file.h> #include <linux/gtp.h> #include <net/net_namespace.h> #include <net/protocol.h> #include <net/inet_dscp.h> #include <net/inet_sock.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/udp.h> #include <net/udp_tunnel.h> #include <net/icmp.h> #include <net/xfrm.h> #include <net/genetlink.h> #include <net/netns/generic.h> #include <net/gtp.h> /* An active session for the subscriber. */ struct pdp_ctx { struct hlist_node hlist_tid; struct hlist_node hlist_addr; union { struct { u64 tid; u16 flow; } v0; struct { u32 i_tei; u32 o_tei; } v1; } u; u8 gtp_version; u16 af; union { struct in_addr addr; struct in6_addr addr6; } ms; union { struct in_addr addr; struct in6_addr addr6; } peer; struct sock *sk; struct net_device *dev; atomic_t tx_seq; struct rcu_head rcu_head; }; /* One instance of the GTP device. */ struct gtp_dev { struct list_head list; struct sock *sk0; struct sock *sk1u; u8 sk_created; struct net_device *dev; struct net *net; unsigned int role; unsigned int hash_size; struct hlist_head *tid_hash; struct hlist_head *addr_hash; u8 restart_count; }; struct echo_info { u16 af; u8 gtp_version; union { struct in_addr addr; } ms; union { struct in_addr addr; } peer; }; static unsigned int gtp_net_id __read_mostly; struct gtp_net { struct list_head gtp_dev_list; }; static u32 gtp_h_initval; static struct genl_family gtp_genl_family; enum gtp_multicast_groups { GTP_GENL_MCGRP, }; static const struct genl_multicast_group gtp_genl_mcgrps[] = { [GTP_GENL_MCGRP] = { .name = GTP_GENL_MCGRP_NAME }, }; static void pdp_context_delete(struct pdp_ctx *pctx); static inline u32 gtp0_hashfn(u64 tid) { u32 *tid32 = (u32 *) &tid; return jhash_2words(tid32[0], tid32[1], gtp_h_initval); } static inline u32 gtp1u_hashfn(u32 tid) { return jhash_1word(tid, gtp_h_initval); } static inline u32 ipv4_hashfn(__be32 ip) { return jhash_1word((__force u32)ip, gtp_h_initval); } static u32 ipv6_hashfn(const struct in6_addr *ip6) { return jhash_2words((__force u32)ip6->s6_addr32[0], (__force u32)ip6->s6_addr32[1], gtp_h_initval); } /* Resolve a PDP context structure based on the 64bit TID. */ static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid, u16 family) { struct hlist_head *head; struct pdp_ctx *pdp; head = &gtp->tid_hash[gtp0_hashfn(tid) % gtp->hash_size]; hlist_for_each_entry_rcu(pdp, head, hlist_tid) { if (pdp->af == family && pdp->gtp_version == GTP_V0 && pdp->u.v0.tid == tid) return pdp; } return NULL; } /* Resolve a PDP context structure based on the 32bit TEI. */ static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid, u16 family) { struct hlist_head *head; struct pdp_ctx *pdp; head = &gtp->tid_hash[gtp1u_hashfn(tid) % gtp->hash_size]; hlist_for_each_entry_rcu(pdp, head, hlist_tid) { if (pdp->af == family && pdp->gtp_version == GTP_V1 && pdp->u.v1.i_tei == tid) return pdp; } return NULL; } /* Resolve a PDP context based on IPv4 address of MS. */ static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr) { struct hlist_head *head; struct pdp_ctx *pdp; head = &gtp->addr_hash[ipv4_hashfn(ms_addr) % gtp->hash_size]; hlist_for_each_entry_rcu(pdp, head, hlist_addr) { if (pdp->af == AF_INET && pdp->ms.addr.s_addr == ms_addr) return pdp; } return NULL; } /* 3GPP TS 29.060: PDN Connection: the association between a MS represented by * [...] one IPv6 *prefix* and a PDN represented by an APN. * * Then, 3GPP TS 29.061, Section 11.2.1.3 says: The size of the prefix shall be * according to the maximum prefix length for a global IPv6 address as * specified in the IPv6 Addressing Architecture, see RFC 4291. * * Finally, RFC 4291 section 2.5.4 states: All Global Unicast addresses other * than those that start with binary 000 have a 64-bit interface ID field * (i.e., n + m = 64). */ static bool ipv6_pdp_addr_equal(const struct in6_addr *a, const struct in6_addr *b) { return a->s6_addr32[0] == b->s6_addr32[0] && a->s6_addr32[1] == b->s6_addr32[1]; } static struct pdp_ctx *ipv6_pdp_find(struct gtp_dev *gtp, const struct in6_addr *ms_addr) { struct hlist_head *head; struct pdp_ctx *pdp; head = &gtp->addr_hash[ipv6_hashfn(ms_addr) % gtp->hash_size]; hlist_for_each_entry_rcu(pdp, head, hlist_addr) { if (pdp->af == AF_INET6 && ipv6_pdp_addr_equal(&pdp->ms.addr6, ms_addr)) return pdp; } return NULL; } static bool gtp_check_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx, unsigned int hdrlen, unsigned int role) { struct iphdr *iph; if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr))) return false; iph = (struct iphdr *)(skb->data + hdrlen); if (role == GTP_ROLE_SGSN) return iph->daddr == pctx->ms.addr.s_addr; else return iph->saddr == pctx->ms.addr.s_addr; } static bool gtp_check_ms_ipv6(struct sk_buff *skb, struct pdp_ctx *pctx, unsigned int hdrlen, unsigned int role) { struct ipv6hdr *ip6h; int ret; if (!pskb_may_pull(skb, hdrlen + sizeof(struct ipv6hdr))) return false; ip6h = (struct ipv6hdr *)(skb->data + hdrlen); if ((ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL) || (ipv6_addr_type(&ip6h->daddr) & IPV6_ADDR_LINKLOCAL)) return false; if (role == GTP_ROLE_SGSN) { ret = ipv6_pdp_addr_equal(&ip6h->daddr, &pctx->ms.addr6); } else { ret = ipv6_pdp_addr_equal(&ip6h->saddr, &pctx->ms.addr6); } return ret; } /* Check if the inner IP address in this packet is assigned to any * existing mobile subscriber. */ static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx, unsigned int hdrlen, unsigned int role, __u16 inner_proto) { switch (inner_proto) { case ETH_P_IP: return gtp_check_ms_ipv4(skb, pctx, hdrlen, role); case ETH_P_IPV6: return gtp_check_ms_ipv6(skb, pctx, hdrlen, role); } return false; } static int gtp_inner_proto(struct sk_buff *skb, unsigned int hdrlen, __u16 *inner_proto) { __u8 *ip_version, _ip_version; ip_version = skb_header_pointer(skb, hdrlen, sizeof(*ip_version), &_ip_version); if (!ip_version) return -1; switch (*ip_version & 0xf0) { case 0x40: *inner_proto = ETH_P_IP; break; case 0x60: *inner_proto = ETH_P_IPV6; break; default: return -1; } return 0; } static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb, unsigned int hdrlen, unsigned int role, __u16 inner_proto) { if (!gtp_check_ms(skb, pctx, hdrlen, role, inner_proto)) { netdev_dbg(pctx->dev, "No PDP ctx for this MS\n"); return 1; } /* Get rid of the GTP + UDP headers. */ if (iptunnel_pull_header(skb, hdrlen, htons(inner_proto), !net_eq(sock_net(pctx->sk), dev_net(pctx->dev)))) { pctx->dev->stats.rx_length_errors++; goto err; } netdev_dbg(pctx->dev, "forwarding packet from GGSN to uplink\n"); /* Now that the UDP and the GTP header have been removed, set up the * new network header. This is required by the upper layer to * calculate the transport header. */ skb_reset_network_header(skb); skb_reset_mac_header(skb); skb->dev = pctx->dev; dev_sw_netstats_rx_add(pctx->dev, skb->len); __netif_rx(skb); return 0; err: pctx->dev->stats.rx_dropped++; return -1; } static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4, const struct sock *sk, __be32 daddr, __be32 saddr) { memset(fl4, 0, sizeof(*fl4)); fl4->flowi4_oif = sk->sk_bound_dev_if; fl4->daddr = daddr; fl4->saddr = saddr; fl4->flowi4_tos = inet_dscp_to_dsfield(inet_sk_dscp(inet_sk(sk))); fl4->flowi4_scope = ip_sock_rt_scope(sk); fl4->flowi4_proto = sk->sk_protocol; return ip_route_output_key(sock_net(sk), fl4); } static struct rt6_info *ip6_route_output_gtp(struct net *net, struct flowi6 *fl6, const struct sock *sk, const struct in6_addr *daddr, struct in6_addr *saddr) { struct dst_entry *dst; memset(fl6, 0, sizeof(*fl6)); fl6->flowi6_oif = sk->sk_bound_dev_if; fl6->daddr = *daddr; fl6->saddr = *saddr; fl6->flowi6_proto = sk->sk_protocol; dst = ipv6_stub->ipv6_dst_lookup_flow(net, sk, fl6, NULL); if (IS_ERR(dst)) return ERR_PTR(-ENETUNREACH); return (struct rt6_info *)dst; } /* GSM TS 09.60. 7.3 * In all Path Management messages: * - TID: is not used and shall be set to 0. * - Flow Label is not used and shall be set to 0 * In signalling messages: * - number: this field is not yet used in signalling messages. * It shall be set to 255 by the sender and shall be ignored * by the receiver * Returns true if the echo req was correct, false otherwise. */ static bool gtp0_validate_echo_hdr(struct gtp0_header *gtp0) { return !(gtp0->tid || (gtp0->flags ^ 0x1e) || gtp0->number != 0xff || gtp0->flow); } /* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */ static void gtp0_build_echo_msg(struct gtp0_header *hdr, __u8 msg_type) { int len_pkt, len_hdr; hdr->flags = 0x1e; /* v0, GTP-non-prime. */ hdr->type = msg_type; /* GSM TS 09.60. 7.3 In all Path Management Flow Label and TID * are not used and shall be set to 0. */ hdr->flow = 0; hdr->tid = 0; hdr->number = 0xff; hdr->spare[0] = 0xff; hdr->spare[1] = 0xff; hdr->spare[2] = 0xff; len_pkt = sizeof(struct gtp0_packet); len_hdr = sizeof(struct gtp0_header); if (msg_type == GTP_ECHO_RSP) hdr->length = htons(len_pkt - len_hdr); else hdr->length = 0; } static int gtp0_send_echo_resp_ip(struct gtp_dev *gtp, struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); struct flowi4 fl4; struct rtable *rt; /* find route to the sender, * src address becomes dst address and vice versa. */ rt = ip4_route_output_gtp(&fl4, gtp->sk0, iph->saddr, iph->daddr); if (IS_ERR(rt)) { netdev_dbg(gtp->dev, "no route for echo response from %pI4\n", &iph->saddr); return -1; } udp_tunnel_xmit_skb(rt, gtp->sk0, skb, fl4.saddr, fl4.daddr, iph->tos, ip4_dst_hoplimit(&rt->dst), 0, htons(GTP0_PORT), htons(GTP0_PORT), !net_eq(sock_net(gtp->sk1u), dev_net(gtp->dev)), false); return 0; } static int gtp0_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb) { struct gtp0_packet *gtp_pkt; struct gtp0_header *gtp0; __be16 seq; gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr)); if (!gtp0_validate_echo_hdr(gtp0)) return -1; seq = gtp0->seq; /* pull GTP and UDP headers */ skb_pull_data(skb, sizeof(struct gtp0_header) + sizeof(struct udphdr)); gtp_pkt = skb_push(skb, sizeof(struct gtp0_packet)); memset(gtp_pkt, 0, sizeof(struct gtp0_packet)); gtp0_build_echo_msg(&gtp_pkt->gtp0_h, GTP_ECHO_RSP); /* GSM TS 09.60. 7.3 The Sequence Number in a signalling response * message shall be copied from the signalling request message * that the GSN is replying to. */ gtp_pkt->gtp0_h.seq = seq; gtp_pkt->ie.tag = GTPIE_RECOVERY; gtp_pkt->ie.val = gtp->restart_count; switch (gtp->sk0->sk_family) { case AF_INET: if (gtp0_send_echo_resp_ip(gtp, skb) < 0) return -1; break; case AF_INET6: return -1; } return 0; } static int gtp_genl_fill_echo(struct sk_buff *skb, u32 snd_portid, u32 snd_seq, int flags, u32 type, struct echo_info echo) { void *genlh; genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, flags, type); if (!genlh) goto failure; if (nla_put_u32(skb, GTPA_VERSION, echo.gtp_version) || nla_put_be32(skb, GTPA_PEER_ADDRESS, echo.peer.addr.s_addr) || nla_put_be32(skb, GTPA_MS_ADDRESS, echo.ms.addr.s_addr)) goto failure; genlmsg_end(skb, genlh); return 0; failure: genlmsg_cancel(skb, genlh); return -EMSGSIZE; } static void gtp0_handle_echo_resp_ip(struct sk_buff *skb, struct echo_info *echo) { struct iphdr *iph = ip_hdr(skb); echo->ms.addr.s_addr = iph->daddr; echo->peer.addr.s_addr = iph->saddr; echo->gtp_version = GTP_V0; } static int gtp0_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb) { struct gtp0_header *gtp0; struct echo_info echo; struct sk_buff *msg; int ret; gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr)); if (!gtp0_validate_echo_hdr(gtp0)) return -1; switch (gtp->sk0->sk_family) { case AF_INET: gtp0_handle_echo_resp_ip(skb, &echo); break; case AF_INET6: return -1; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!msg) return -ENOMEM; ret = gtp_genl_fill_echo(msg, 0, 0, 0, GTP_CMD_ECHOREQ, echo); if (ret < 0) { nlmsg_free(msg); return ret; } return genlmsg_multicast_netns(&gtp_genl_family, dev_net(gtp->dev), msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC); } static int gtp_proto_to_family(__u16 proto) { switch (proto) { case ETH_P_IP: return AF_INET; case ETH_P_IPV6: return AF_INET6; default: WARN_ON_ONCE(1); break; } return AF_UNSPEC; } /* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) { unsigned int hdrlen = sizeof(struct udphdr) + sizeof(struct gtp0_header); struct gtp0_header *gtp0; struct pdp_ctx *pctx; __u16 inner_proto; if (!pskb_may_pull(skb, hdrlen)) return -1; gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr)); if ((gtp0->flags >> 5) != GTP_V0) return 1; /* If the sockets were created in kernel, it means that * there is no daemon running in userspace which would * handle echo request. */ if (gtp0->type == GTP_ECHO_REQ && gtp->sk_created) return gtp0_send_echo_resp(gtp, skb); if (gtp0->type == GTP_ECHO_RSP && gtp->sk_created) return gtp0_handle_echo_resp(gtp, skb); if (gtp0->type != GTP_TPDU) return 1; if (gtp_inner_proto(skb, hdrlen, &inner_proto) < 0) { netdev_dbg(gtp->dev, "GTP packet does not encapsulate an IP packet\n"); return -1; } pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid), gtp_proto_to_family(inner_proto)); if (!pctx) { netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb); return 1; } return gtp_rx(pctx, skb, hdrlen, gtp->role, inner_proto); } /* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */ static void gtp1u_build_echo_msg(struct gtp1_header_long *hdr, __u8 msg_type) { int len_pkt, len_hdr; /* S flag must be set to 1 */ hdr->flags = 0x32; /* v1, GTP-non-prime. */ hdr->type = msg_type; /* 3GPP TS 29.281 5.1 - TEID has to be set to 0 */ hdr->tid = 0; /* seq, npdu and next should be counted to the length of the GTP packet * that's why szie of gtp1_header should be subtracted, * not size of gtp1_header_long. */ len_hdr = sizeof(struct gtp1_header); if (msg_type == GTP_ECHO_RSP) { len_pkt = sizeof(struct gtp1u_packet); hdr->length = htons(len_pkt - len_hdr); } else { /* GTP_ECHO_REQ does not carry GTP Information Element, * the why gtp1_header_long is used here. */ len_pkt = sizeof(struct gtp1_header_long); hdr->length = htons(len_pkt - len_hdr); } } static int gtp1u_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb) { struct gtp1_header_long *gtp1u; struct gtp1u_packet *gtp_pkt; struct rtable *rt; struct flowi4 fl4; struct iphdr *iph; gtp1u = (struct gtp1_header_long *)(skb->data + sizeof(struct udphdr)); /* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response, * Error Indication and Supported Extension Headers Notification * messages, the S flag shall be set to 1 and TEID shall be set to 0. */ if (!(gtp1u->flags & GTP1_F_SEQ) || gtp1u->tid) return -1; /* pull GTP and UDP headers */ skb_pull_data(skb, sizeof(struct gtp1_header_long) + sizeof(struct udphdr)); gtp_pkt = skb_push(skb, sizeof(struct gtp1u_packet)); memset(gtp_pkt, 0, sizeof(struct gtp1u_packet)); gtp1u_build_echo_msg(&gtp_pkt->gtp1u_h, GTP_ECHO_RSP); /* 3GPP TS 29.281 7.7.2 - The Restart Counter value in the * Recovery information element shall not be used, i.e. it shall * be set to zero by the sender and shall be ignored by the receiver. * The Recovery information element is mandatory due to backwards * compatibility reasons. */ gtp_pkt->ie.tag = GTPIE_RECOVERY; gtp_pkt->ie.val = 0; iph = ip_hdr(skb); /* find route to the sender, * src address becomes dst address and vice versa. */ rt = ip4_route_output_gtp(&fl4, gtp->sk1u, iph->saddr, iph->daddr); if (IS_ERR(rt)) { netdev_dbg(gtp->dev, "no route for echo response from %pI4\n", &iph->saddr); return -1; } udp_tunnel_xmit_skb(rt, gtp->sk1u, skb, fl4.saddr, fl4.daddr, iph->tos, ip4_dst_hoplimit(&rt->dst), 0, htons(GTP1U_PORT), htons(GTP1U_PORT), !net_eq(sock_net(gtp->sk1u), dev_net(gtp->dev)), false); return 0; } static int gtp1u_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb) { struct gtp1_header_long *gtp1u; struct echo_info echo; struct sk_buff *msg; struct iphdr *iph; int ret; gtp1u = (struct gtp1_header_long *)(skb->data + sizeof(struct udphdr)); /* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response, * Error Indication and Supported Extension Headers Notification * messages, the S flag shall be set to 1 and TEID shall be set to 0. */ if (!(gtp1u->flags & GTP1_F_SEQ) || gtp1u->tid) return -1; iph = ip_hdr(skb); echo.ms.addr.s_addr = iph->daddr; echo.peer.addr.s_addr = iph->saddr; echo.gtp_version = GTP_V1; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!msg) return -ENOMEM; ret = gtp_genl_fill_echo(msg, 0, 0, 0, GTP_CMD_ECHOREQ, echo); if (ret < 0) { nlmsg_free(msg); return ret; } return genlmsg_multicast_netns(&gtp_genl_family, dev_net(gtp->dev), msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC); } static int gtp_parse_exthdrs(struct sk_buff *skb, unsigned int *hdrlen) { struct gtp_ext_hdr *gtp_exthdr, _gtp_exthdr; unsigned int offset = *hdrlen; __u8 *next_type, _next_type; /* From 29.060: "The Extension Header Length field specifies the length * of the particular Extension header in 4 octets units." * * This length field includes length field size itself (1 byte), * payload (variable length) and next type (1 byte). The extension * header is aligned to to 4 bytes. */ do { gtp_exthdr = skb_header_pointer(skb, offset, sizeof(*gtp_exthdr), &_gtp_exthdr); if (!gtp_exthdr || !gtp_exthdr->len) return -1; offset += gtp_exthdr->len * 4; /* From 29.060: "If no such Header follows, then the value of * the Next Extension Header Type shall be 0." */ next_type = skb_header_pointer(skb, offset - 1, sizeof(_next_type), &_next_type); if (!next_type) return -1; } while (*next_type != 0); *hdrlen = offset; return 0; } static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) { unsigned int hdrlen = sizeof(struct udphdr) + sizeof(struct gtp1_header); struct gtp1_header *gtp1; struct pdp_ctx *pctx; __u16 inner_proto; if (!pskb_may_pull(skb, hdrlen)) return -1; gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr)); if ((gtp1->flags >> 5) != GTP_V1) return 1; /* If the sockets were created in kernel, it means that * there is no daemon running in userspace which would * handle echo request. */ if (gtp1->type == GTP_ECHO_REQ && gtp->sk_created) return gtp1u_send_echo_resp(gtp, skb); if (gtp1->type == GTP_ECHO_RSP && gtp->sk_created) return gtp1u_handle_echo_resp(gtp, skb); if (gtp1->type != GTP_TPDU) return 1; /* From 29.060: "This field shall be present if and only if any one or * more of the S, PN and E flags are set.". * * If any of the bit is set, then the remaining ones also have to be * set. */ if (gtp1->flags & GTP1_F_MASK) hdrlen += 4; /* Make sure the header is larger enough, including extensions. */ if (!pskb_may_pull(skb, hdrlen)) return -1; if (gtp_inner_proto(skb, hdrlen, &inner_proto) < 0) { netdev_dbg(gtp->dev, "GTP packet does not encapsulate an IP packet\n"); return -1; } gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr)); pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid), gtp_proto_to_family(inner_proto)); if (!pctx) { netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb); return 1; } if (gtp1->flags & GTP1_F_EXTHDR && gtp_parse_exthdrs(skb, &hdrlen) < 0) return -1; return gtp_rx(pctx, skb, hdrlen, gtp->role, inner_proto); } static void __gtp_encap_destroy(struct sock *sk) { struct gtp_dev *gtp; lock_sock(sk); gtp = sk->sk_user_data; if (gtp) { if (gtp->sk0 == sk) gtp->sk0 = NULL; else gtp->sk1u = NULL; WRITE_ONCE(udp_sk(sk)->encap_type, 0); rcu_assign_sk_user_data(sk, NULL); release_sock(sk); sock_put(sk); return; } release_sock(sk); } static void gtp_encap_destroy(struct sock *sk) { rtnl_lock(); __gtp_encap_destroy(sk); rtnl_unlock(); } static void gtp_encap_disable_sock(struct sock *sk) { if (!sk) return; __gtp_encap_destroy(sk); } static void gtp_encap_disable(struct gtp_dev *gtp) { if (gtp->sk_created) { udp_tunnel_sock_release(gtp->sk0->sk_socket); udp_tunnel_sock_release(gtp->sk1u->sk_socket); gtp->sk_created = false; gtp->sk0 = NULL; gtp->sk1u = NULL; } else { gtp_encap_disable_sock(gtp->sk0); gtp_encap_disable_sock(gtp->sk1u); } } /* UDP encapsulation receive handler. See net/ipv4/udp.c. * Return codes: 0: success, <0: error, >0: pass up to userspace UDP socket. */ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb) { struct gtp_dev *gtp; int ret = 0; gtp = rcu_dereference_sk_user_data(sk); if (!gtp) return 1; netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk); switch (READ_ONCE(udp_sk(sk)->encap_type)) { case UDP_ENCAP_GTP0: netdev_dbg(gtp->dev, "received GTP0 packet\n"); ret = gtp0_udp_encap_recv(gtp, skb); break; case UDP_ENCAP_GTP1U: netdev_dbg(gtp->dev, "received GTP1U packet\n"); ret = gtp1u_udp_encap_recv(gtp, skb); break; default: ret = -1; /* Shouldn't happen. */ } switch (ret) { case 1: netdev_dbg(gtp->dev, "pass up to the process\n"); break; case 0: break; case -1: netdev_dbg(gtp->dev, "GTP packet has been dropped\n"); kfree_skb(skb); ret = 0; break; } return ret; } static void gtp_dev_uninit(struct net_device *dev) { struct gtp_dev *gtp = netdev_priv(dev); gtp_encap_disable(gtp); } static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx) { int payload_len = skb->len; struct gtp0_header *gtp0; gtp0 = skb_push(skb, sizeof(*gtp0)); gtp0->flags = 0x1e; /* v0, GTP-non-prime. */ gtp0->type = GTP_TPDU; gtp0->length = htons(payload_len); gtp0->seq = htons((atomic_inc_return(&pctx->tx_seq) - 1) % 0xffff); gtp0->flow = htons(pctx->u.v0.flow); gtp0->number = 0xff; gtp0->spare[0] = gtp0->spare[1] = gtp0->spare[2] = 0xff; gtp0->tid = cpu_to_be64(pctx->u.v0.tid); } static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx) { int payload_len = skb->len; struct gtp1_header *gtp1; gtp1 = skb_push(skb, sizeof(*gtp1)); /* Bits 8 7 6 5 4 3 2 1 * +--+--+--+--+--+--+--+--+ * |version |PT| 0| E| S|PN| * +--+--+--+--+--+--+--+--+ * 0 0 1 1 1 0 0 0 */ gtp1->flags = 0x30; /* v1, GTP-non-prime. */ gtp1->type = GTP_TPDU; gtp1->length = htons(payload_len); gtp1->tid = htonl(pctx->u.v1.o_tei); /* TODO: Support for extension header, sequence number and N-PDU. * Update the length field if any of them is available. */ } struct gtp_pktinfo { struct sock *sk; union { struct flowi4 fl4; struct flowi6 fl6; }; union { struct rtable *rt; struct rt6_info *rt6; }; struct pdp_ctx *pctx; struct net_device *dev; __u8 tos; __be16 gtph_port; }; static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo) { switch (pktinfo->pctx->gtp_version) { case GTP_V0: pktinfo->gtph_port = htons(GTP0_PORT); gtp0_push_header(skb, pktinfo->pctx); break; case GTP_V1: pktinfo->gtph_port = htons(GTP1U_PORT); gtp1_push_header(skb, pktinfo->pctx); break; } } static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo, struct sock *sk, __u8 tos, struct pdp_ctx *pctx, struct rtable *rt, struct flowi4 *fl4, struct net_device *dev) { pktinfo->sk = sk; pktinfo->tos = tos; pktinfo->pctx = pctx; pktinfo->rt = rt; pktinfo->fl4 = *fl4; pktinfo->dev = dev; } static void gtp_set_pktinfo_ipv6(struct gtp_pktinfo *pktinfo, struct sock *sk, __u8 tos, struct pdp_ctx *pctx, struct rt6_info *rt6, struct flowi6 *fl6, struct net_device *dev) { pktinfo->sk = sk; pktinfo->tos = tos; pktinfo->pctx = pctx; pktinfo->rt6 = rt6; pktinfo->fl6 = *fl6; pktinfo->dev = dev; } static int gtp_build_skb_outer_ip4(struct sk_buff *skb, struct net_device *dev, struct gtp_pktinfo *pktinfo, struct pdp_ctx *pctx, __u8 tos, __be16 frag_off) { struct rtable *rt; struct flowi4 fl4; __be16 df; int mtu; rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer.addr.s_addr, inet_sk(pctx->sk)->inet_saddr); if (IS_ERR(rt)) { netdev_dbg(dev, "no route to SSGN %pI4\n", &pctx->peer.addr.s_addr); dev->stats.tx_carrier_errors++; goto err; } if (rt->dst.dev == dev) { netdev_dbg(dev, "circular route to SSGN %pI4\n", &pctx->peer.addr.s_addr); dev->stats.collisions++; goto err_rt; } /* This is similar to tnl_update_pmtu(). */ df = frag_off; if (df) { mtu = dst_mtu(&rt->dst) - dev->hard_header_len - sizeof(struct iphdr) - sizeof(struct udphdr); switch (pctx->gtp_version) { case GTP_V0: mtu -= sizeof(struct gtp0_header); break; case GTP_V1: mtu -= sizeof(struct gtp1_header); break; } } else { mtu = dst_mtu(&rt->dst); } skb_dst_update_pmtu_no_confirm(skb, mtu); if (frag_off & htons(IP_DF) && ((!skb_is_gso(skb) && skb->len > mtu) || (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu)))) { netdev_dbg(dev, "packet too big, fragmentation needed\n"); icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); goto err_rt; } gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, tos, pctx, rt, &fl4, dev); gtp_push_header(skb, pktinfo); return 0; err_rt: ip_rt_put(rt); err: return -EBADMSG; } static int gtp_build_skb_outer_ip6(struct net *net, struct sk_buff *skb, struct net_device *dev, struct gtp_pktinfo *pktinfo, struct pdp_ctx *pctx, __u8 tos) { struct dst_entry *dst; struct rt6_info *rt; struct flowi6 fl6; int mtu; rt = ip6_route_output_gtp(net, &fl6, pctx->sk, &pctx->peer.addr6, &inet6_sk(pctx->sk)->saddr); if (IS_ERR(rt)) { netdev_dbg(dev, "no route to SSGN %pI6\n", &pctx->peer.addr6); dev->stats.tx_carrier_errors++; goto err; } dst = &rt->dst; if (rt->dst.dev == dev) { netdev_dbg(dev, "circular route to SSGN %pI6\n", &pctx->peer.addr6); dev->stats.collisions++; goto err_rt; } mtu = dst_mtu(&rt->dst) - dev->hard_header_len - sizeof(struct ipv6hdr) - sizeof(struct udphdr); switch (pctx->gtp_version) { case GTP_V0: mtu -= sizeof(struct gtp0_header); break; case GTP_V1: mtu -= sizeof(struct gtp1_header); break; } skb_dst_update_pmtu_no_confirm(skb, mtu); if ((!skb_is_gso(skb) && skb->len > mtu) || (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu))) { netdev_dbg(dev, "packet too big, fragmentation needed\n"); icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); goto err_rt; } gtp_set_pktinfo_ipv6(pktinfo, pctx->sk, tos, pctx, rt, &fl6, dev); gtp_push_header(skb, pktinfo); return 0; err_rt: dst_release(dst); err: return -EBADMSG; } static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, struct gtp_pktinfo *pktinfo) { struct gtp_dev *gtp = netdev_priv(dev); struct net *net = gtp->net; struct pdp_ctx *pctx; struct iphdr *iph; int ret; /* Read the IP destination address and resolve the PDP context. * Prepend PDP header with TEI/TID from PDP ctx. */ iph = ip_hdr(skb); if (gtp->role == GTP_ROLE_SGSN) pctx = ipv4_pdp_find(gtp, iph->saddr); else pctx = ipv4_pdp_find(gtp, iph->daddr); if (!pctx) { netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n", &iph->daddr); return -ENOENT; } netdev_dbg(dev, "found PDP context %p\n", pctx); switch (pctx->sk->sk_family) { case AF_INET: ret = gtp_build_skb_outer_ip4(skb, dev, pktinfo, pctx, iph->tos, iph->frag_off); break; case AF_INET6: ret = gtp_build_skb_outer_ip6(net, skb, dev, pktinfo, pctx, iph->tos); break; default: ret = -1; WARN_ON_ONCE(1); break; } if (ret < 0) return ret; netdev_dbg(dev, "gtp -> IP src: %pI4 dst: %pI4\n", &iph->saddr, &iph->daddr); return 0; } static int gtp_build_skb_ip6(struct sk_buff *skb, struct net_device *dev, struct gtp_pktinfo *pktinfo) { struct gtp_dev *gtp = netdev_priv(dev); struct net *net = gtp->net; struct pdp_ctx *pctx; struct ipv6hdr *ip6h; __u8 tos; int ret; /* Read the IP destination address and resolve the PDP context. * Prepend PDP header with TEI/TID from PDP ctx. */ ip6h = ipv6_hdr(skb); if (gtp->role == GTP_ROLE_SGSN) pctx = ipv6_pdp_find(gtp, &ip6h->saddr); else pctx = ipv6_pdp_find(gtp, &ip6h->daddr); if (!pctx) { netdev_dbg(dev, "no PDP ctx found for %pI6, skip\n", &ip6h->daddr); return -ENOENT; } netdev_dbg(dev, "found PDP context %p\n", pctx); tos = ipv6_get_dsfield(ip6h); switch (pctx->sk->sk_family) { case AF_INET: ret = gtp_build_skb_outer_ip4(skb, dev, pktinfo, pctx, tos, 0); break; case AF_INET6: ret = gtp_build_skb_outer_ip6(net, skb, dev, pktinfo, pctx, tos); break; default: ret = -1; WARN_ON_ONCE(1); break; } if (ret < 0) return ret; netdev_dbg(dev, "gtp -> IP src: %pI6 dst: %pI6\n", &ip6h->saddr, &ip6h->daddr); return 0; } static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned int proto = ntohs(skb->protocol); struct gtp_pktinfo pktinfo; int err; /* Ensure there is sufficient headroom. */ if (skb_cow_head(skb, dev->needed_headroom)) goto tx_err; if (!pskb_inet_may_pull(skb)) goto tx_err; skb_reset_inner_headers(skb); /* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */ rcu_read_lock(); switch (proto) { case ETH_P_IP: err = gtp_build_skb_ip4(skb, dev, &pktinfo); break; case ETH_P_IPV6: err = gtp_build_skb_ip6(skb, dev, &pktinfo); break; default: err = -EOPNOTSUPP; break; } rcu_read_unlock(); if (err < 0) goto tx_err; switch (pktinfo.pctx->sk->sk_family) { case AF_INET: udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb, pktinfo.fl4.saddr, pktinfo.fl4.daddr, pktinfo.tos, ip4_dst_hoplimit(&pktinfo.rt->dst), 0, pktinfo.gtph_port, pktinfo.gtph_port, !net_eq(sock_net(pktinfo.pctx->sk), dev_net(dev)), false); break; case AF_INET6: #if IS_ENABLED(CONFIG_IPV6) udp_tunnel6_xmit_skb(&pktinfo.rt6->dst, pktinfo.sk, skb, dev, &pktinfo.fl6.saddr, &pktinfo.fl6.daddr, pktinfo.tos, ip6_dst_hoplimit(&pktinfo.rt->dst), 0, pktinfo.gtph_port, pktinfo.gtph_port, false); #else goto tx_err; #endif break; } return NETDEV_TX_OK; tx_err: dev->stats.tx_errors++; dev_kfree_skb(skb); return NETDEV_TX_OK; } static const struct net_device_ops gtp_netdev_ops = { .ndo_uninit = gtp_dev_uninit, .ndo_start_xmit = gtp_dev_xmit, }; static const struct device_type gtp_type = { .name = "gtp", }; #define GTP_TH_MAXLEN (sizeof(struct udphdr) + sizeof(struct gtp0_header)) #define GTP_IPV4_MAXLEN (sizeof(struct iphdr) + GTP_TH_MAXLEN) static void gtp_link_setup(struct net_device *dev) { struct gtp_dev *gtp = netdev_priv(dev); dev->netdev_ops = &gtp_netdev_ops; dev->needs_free_netdev = true; SET_NETDEV_DEVTYPE(dev, &gtp_type); dev->hard_header_len = 0; dev->addr_len = 0; dev->mtu = ETH_DATA_LEN - GTP_IPV4_MAXLEN; /* Zero header length. */ dev->type = ARPHRD_NONE; dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; dev->priv_flags |= IFF_NO_QUEUE; dev->lltx = true; netif_keep_dst(dev); dev->needed_headroom = LL_MAX_HEADER + GTP_IPV4_MAXLEN; gtp->dev = dev; } static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize); static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]); static void gtp_destructor(struct net_device *dev) { struct gtp_dev *gtp = netdev_priv(dev); kfree(gtp->addr_hash); kfree(gtp->tid_hash); } static int gtp_sock_udp_config(struct udp_port_cfg *udp_conf, const struct nlattr *nla, int family) { udp_conf->family = family; switch (udp_conf->family) { case AF_INET: udp_conf->local_ip.s_addr = nla_get_be32(nla); break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: udp_conf->local_ip6 = nla_get_in6_addr(nla); break; #endif default: return -EOPNOTSUPP; } return 0; } static struct sock *gtp_create_sock(int type, struct gtp_dev *gtp, const struct nlattr *nla, int family) { struct udp_tunnel_sock_cfg tuncfg = {}; struct udp_port_cfg udp_conf = {}; struct net *net = gtp->net; struct socket *sock; int err; if (nla) { err = gtp_sock_udp_config(&udp_conf, nla, family); if (err < 0) return ERR_PTR(err); } else { udp_conf.local_ip.s_addr = htonl(INADDR_ANY); udp_conf.family = AF_INET; } if (type == UDP_ENCAP_GTP0) udp_conf.local_udp_port = htons(GTP0_PORT); else if (type == UDP_ENCAP_GTP1U) udp_conf.local_udp_port = htons(GTP1U_PORT); else return ERR_PTR(-EINVAL); err = udp_sock_create(net, &udp_conf, &sock); if (err) return ERR_PTR(err); tuncfg.sk_user_data = gtp; tuncfg.encap_type = type; tuncfg.encap_rcv = gtp_encap_recv; tuncfg.encap_destroy = NULL; setup_udp_tunnel_sock(net, sock, &tuncfg); return sock->sk; } static int gtp_create_sockets(struct gtp_dev *gtp, const struct nlattr *nla, int family) { struct sock *sk1u; struct sock *sk0; sk0 = gtp_create_sock(UDP_ENCAP_GTP0, gtp, nla, family); if (IS_ERR(sk0)) return PTR_ERR(sk0); sk1u = gtp_create_sock(UDP_ENCAP_GTP1U, gtp, nla, family); if (IS_ERR(sk1u)) { udp_tunnel_sock_release(sk0->sk_socket); return PTR_ERR(sk1u); } gtp->sk_created = true; gtp->sk0 = sk0; gtp->sk1u = sk1u; return 0; } #define GTP_TH_MAXLEN (sizeof(struct udphdr) + sizeof(struct gtp0_header)) #define GTP_IPV6_MAXLEN (sizeof(struct ipv6hdr) + GTP_TH_MAXLEN) static int gtp_newlink(struct net_device *dev, struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { struct net *link_net = rtnl_newlink_link_net(params); struct nlattr **data = params->data; unsigned int role = GTP_ROLE_GGSN; struct gtp_dev *gtp; struct gtp_net *gn; int hashsize, err; #if !IS_ENABLED(CONFIG_IPV6) if (data[IFLA_GTP_LOCAL6]) return -EAFNOSUPPORT; #endif gtp = netdev_priv(dev); if (!data[IFLA_GTP_PDP_HASHSIZE]) { hashsize = 1024; } else { hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]); if (!hashsize) hashsize = 1024; } if (data[IFLA_GTP_ROLE]) { role = nla_get_u32(data[IFLA_GTP_ROLE]); if (role > GTP_ROLE_SGSN) return -EINVAL; } gtp->role = role; gtp->restart_count = nla_get_u8_default(data[IFLA_GTP_RESTART_COUNT], 0); gtp->net = link_net; err = gtp_hashtable_new(gtp, hashsize); if (err < 0) return err; if (data[IFLA_GTP_CREATE_SOCKETS]) { if (data[IFLA_GTP_LOCAL6]) err = gtp_create_sockets(gtp, data[IFLA_GTP_LOCAL6], AF_INET6); else err = gtp_create_sockets(gtp, data[IFLA_GTP_LOCAL], AF_INET); } else { err = gtp_encap_enable(gtp, data); } if (err < 0) goto out_hashtable; if ((gtp->sk0 && gtp->sk0->sk_family == AF_INET6) || (gtp->sk1u && gtp->sk1u->sk_family == AF_INET6)) { dev->mtu = ETH_DATA_LEN - GTP_IPV6_MAXLEN; dev->needed_headroom = LL_MAX_HEADER + GTP_IPV6_MAXLEN; } err = register_netdevice(dev); if (err < 0) { netdev_dbg(dev, "failed to register new netdev %d\n", err); goto out_encap; } gn = net_generic(link_net, gtp_net_id); list_add(&gtp->list, &gn->gtp_dev_list); dev->priv_destructor = gtp_destructor; netdev_dbg(dev, "registered new GTP interface\n"); return 0; out_encap: gtp_encap_disable(gtp); out_hashtable: kfree(gtp->addr_hash); kfree(gtp->tid_hash); return err; } static void gtp_dellink(struct net_device *dev, struct list_head *head) { struct gtp_dev *gtp = netdev_priv(dev); struct hlist_node *next; struct pdp_ctx *pctx; int i; for (i = 0; i < gtp->hash_size; i++) hlist_for_each_entry_safe(pctx, next, &gtp->tid_hash[i], hlist_tid) pdp_context_delete(pctx); list_del(&gtp->list); unregister_netdevice_queue(dev, head); } static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = { [IFLA_GTP_FD0] = { .type = NLA_U32 }, [IFLA_GTP_FD1] = { .type = NLA_U32 }, [IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 }, [IFLA_GTP_ROLE] = { .type = NLA_U32 }, [IFLA_GTP_CREATE_SOCKETS] = { .type = NLA_U8 }, [IFLA_GTP_RESTART_COUNT] = { .type = NLA_U8 }, [IFLA_GTP_LOCAL] = { .type = NLA_U32 }, [IFLA_GTP_LOCAL6] = { .len = sizeof(struct in6_addr) }, }; static int gtp_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { if (!data) return -EINVAL; return 0; } static size_t gtp_get_size(const struct net_device *dev) { return nla_total_size(sizeof(__u32)) + /* IFLA_GTP_PDP_HASHSIZE */ nla_total_size(sizeof(__u32)) + /* IFLA_GTP_ROLE */ nla_total_size(sizeof(__u8)); /* IFLA_GTP_RESTART_COUNT */ } static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct gtp_dev *gtp = netdev_priv(dev); if (nla_put_u32(skb, IFLA_GTP_PDP_HASHSIZE, gtp->hash_size)) goto nla_put_failure; if (nla_put_u32(skb, IFLA_GTP_ROLE, gtp->role)) goto nla_put_failure; if (nla_put_u8(skb, IFLA_GTP_RESTART_COUNT, gtp->restart_count)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } static struct rtnl_link_ops gtp_link_ops __read_mostly = { .kind = "gtp", .maxtype = IFLA_GTP_MAX, .policy = gtp_policy, .priv_size = sizeof(struct gtp_dev), .setup = gtp_link_setup, .validate = gtp_validate, .newlink = gtp_newlink, .dellink = gtp_dellink, .get_size = gtp_get_size, .fill_info = gtp_fill_info, }; static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize) { int i; gtp->addr_hash = kmalloc_array(hsize, sizeof(struct hlist_head), GFP_KERNEL | __GFP_NOWARN); if (gtp->addr_hash == NULL) return -ENOMEM; gtp->tid_hash = kmalloc_array(hsize, sizeof(struct hlist_head), GFP_KERNEL | __GFP_NOWARN); if (gtp->tid_hash == NULL) goto err1; gtp->hash_size = hsize; for (i = 0; i < hsize; i++) { INIT_HLIST_HEAD(&gtp->addr_hash[i]); INIT_HLIST_HEAD(&gtp->tid_hash[i]); } return 0; err1: kfree(gtp->addr_hash); return -ENOMEM; } static struct sock *gtp_encap_enable_socket(int fd, int type, struct gtp_dev *gtp) { struct udp_tunnel_sock_cfg tuncfg = {NULL}; struct socket *sock; struct sock *sk; int err; pr_debug("enable gtp on %d, %d\n", fd, type); sock = sockfd_lookup(fd, &err); if (!sock) { pr_debug("gtp socket fd=%d not found\n", fd); return ERR_PTR(err); } sk = sock->sk; if (sk->sk_protocol != IPPROTO_UDP || sk->sk_type != SOCK_DGRAM || (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) { pr_debug("socket fd=%d not UDP\n", fd); sk = ERR_PTR(-EINVAL); goto out_sock; } if (sk->sk_family == AF_INET6 && !sk->sk_ipv6only) { sk = ERR_PTR(-EADDRNOTAVAIL); goto out_sock; } lock_sock(sk); if (sk->sk_user_data) { sk = ERR_PTR(-EBUSY); goto out_rel_sock; } sock_hold(sk); tuncfg.sk_user_data = gtp; tuncfg.encap_type = type; tuncfg.encap_rcv = gtp_encap_recv; tuncfg.encap_destroy = gtp_encap_destroy; setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg); out_rel_sock: release_sock(sock->sk); out_sock: sockfd_put(sock); return sk; } static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]) { struct sock *sk1u = NULL; struct sock *sk0 = NULL; if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1]) return -EINVAL; if (data[IFLA_GTP_FD0]) { int fd0 = nla_get_u32(data[IFLA_GTP_FD0]); if (fd0 >= 0) { sk0 = gtp_encap_enable_socket(fd0, UDP_ENCAP_GTP0, gtp); if (IS_ERR(sk0)) return PTR_ERR(sk0); } } if (data[IFLA_GTP_FD1]) { int fd1 = nla_get_u32(data[IFLA_GTP_FD1]); if (fd1 >= 0) { sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp); if (IS_ERR(sk1u)) { gtp_encap_disable_sock(sk0); return PTR_ERR(sk1u); } } } gtp->sk0 = sk0; gtp->sk1u = sk1u; if (sk0 && sk1u && sk0->sk_family != sk1u->sk_family) { gtp_encap_disable_sock(sk0); gtp_encap_disable_sock(sk1u); return -EINVAL; } return 0; } static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[]) { struct gtp_dev *gtp = NULL; struct net_device *dev; struct net *net; /* Examine the link attributes and figure out which network namespace * we are talking about. */ if (nla[GTPA_NET_NS_FD]) net = get_net_ns_by_fd(nla_get_u32(nla[GTPA_NET_NS_FD])); else net = get_net(src_net); if (IS_ERR(net)) return NULL; /* Check if there's an existing gtpX device to configure */ dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK])); if (dev && dev->netdev_ops == &gtp_netdev_ops) gtp = netdev_priv(dev); put_net(net); return gtp; } static void gtp_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info) { pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]); switch (pctx->gtp_version) { case GTP_V0: /* According to TS 09.60, sections 7.5.1 and 7.5.2, the flow * label needs to be the same for uplink and downlink packets, * so let's annotate this. */ pctx->u.v0.tid = nla_get_u64(info->attrs[GTPA_TID]); pctx->u.v0.flow = nla_get_u16(info->attrs[GTPA_FLOW]); break; case GTP_V1: pctx->u.v1.i_tei = nla_get_u32(info->attrs[GTPA_I_TEI]); pctx->u.v1.o_tei = nla_get_u32(info->attrs[GTPA_O_TEI]); break; default: break; } } static void ip_pdp_peer_fill(struct pdp_ctx *pctx, struct genl_info *info) { if (info->attrs[GTPA_PEER_ADDRESS]) { pctx->peer.addr.s_addr = nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]); } else if (info->attrs[GTPA_PEER_ADDR6]) { pctx->peer.addr6 = nla_get_in6_addr(info->attrs[GTPA_PEER_ADDR6]); } } static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info) { ip_pdp_peer_fill(pctx, info); pctx->ms.addr.s_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); gtp_pdp_fill(pctx, info); } static bool ipv6_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info) { ip_pdp_peer_fill(pctx, info); pctx->ms.addr6 = nla_get_in6_addr(info->attrs[GTPA_MS_ADDR6]); if (pctx->ms.addr6.s6_addr32[2] || pctx->ms.addr6.s6_addr32[3]) return false; gtp_pdp_fill(pctx, info); return true; } static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk, struct genl_info *info) { struct pdp_ctx *pctx, *pctx_tid = NULL; struct net_device *dev = gtp->dev; u32 hash_ms, hash_tid = 0; struct in6_addr ms_addr6; unsigned int version; bool found = false; __be32 ms_addr; int family; version = nla_get_u32(info->attrs[GTPA_VERSION]); family = nla_get_u8_default(info->attrs[GTPA_FAMILY], AF_INET); #if !IS_ENABLED(CONFIG_IPV6) if (family == AF_INET6) return ERR_PTR(-EAFNOSUPPORT); #endif if (!info->attrs[GTPA_PEER_ADDRESS] && !info->attrs[GTPA_PEER_ADDR6]) return ERR_PTR(-EINVAL); if ((info->attrs[GTPA_PEER_ADDRESS] && sk->sk_family == AF_INET6) || (info->attrs[GTPA_PEER_ADDR6] && sk->sk_family == AF_INET)) return ERR_PTR(-EAFNOSUPPORT); switch (family) { case AF_INET: if (!info->attrs[GTPA_MS_ADDRESS] || info->attrs[GTPA_MS_ADDR6]) return ERR_PTR(-EINVAL); ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size; pctx = ipv4_pdp_find(gtp, ms_addr); break; case AF_INET6: if (!info->attrs[GTPA_MS_ADDR6] || info->attrs[GTPA_MS_ADDRESS]) return ERR_PTR(-EINVAL); ms_addr6 = nla_get_in6_addr(info->attrs[GTPA_MS_ADDR6]); hash_ms = ipv6_hashfn(&ms_addr6) % gtp->hash_size; pctx = ipv6_pdp_find(gtp, &ms_addr6); break; default: return ERR_PTR(-EAFNOSUPPORT); } if (pctx) found = true; if (version == GTP_V0) pctx_tid = gtp0_pdp_find(gtp, nla_get_u64(info->attrs[GTPA_TID]), family); else if (version == GTP_V1) pctx_tid = gtp1_pdp_find(gtp, nla_get_u32(info->attrs[GTPA_I_TEI]), family); if (pctx_tid) found = true; if (found) { if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) return ERR_PTR(-EEXIST); if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE) return ERR_PTR(-EOPNOTSUPP); if (pctx && pctx_tid) return ERR_PTR(-EEXIST); if (!pctx) pctx = pctx_tid; switch (pctx->af) { case AF_INET: ipv4_pdp_fill(pctx, info); break; case AF_INET6: if (!ipv6_pdp_fill(pctx, info)) return ERR_PTR(-EADDRNOTAVAIL); break; } if (pctx->gtp_version == GTP_V0) netdev_dbg(dev, "GTPv0-U: update tunnel id = %llx (pdp %p)\n", pctx->u.v0.tid, pctx); else if (pctx->gtp_version == GTP_V1) netdev_dbg(dev, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n", pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx); return pctx; } pctx = kmalloc(sizeof(*pctx), GFP_ATOMIC); if (pctx == NULL) return ERR_PTR(-ENOMEM); sock_hold(sk); pctx->sk = sk; pctx->dev = gtp->dev; pctx->af = family; switch (pctx->af) { case AF_INET: if (!info->attrs[GTPA_MS_ADDRESS]) { sock_put(sk); kfree(pctx); return ERR_PTR(-EINVAL); } ipv4_pdp_fill(pctx, info); break; case AF_INET6: if (!info->attrs[GTPA_MS_ADDR6]) { sock_put(sk); kfree(pctx); return ERR_PTR(-EINVAL); } if (!ipv6_pdp_fill(pctx, info)) { sock_put(sk); kfree(pctx); return ERR_PTR(-EADDRNOTAVAIL); } break; } atomic_set(&pctx->tx_seq, 0); switch (pctx->gtp_version) { case GTP_V0: /* TS 09.60: "The flow label identifies unambiguously a GTP * flow.". We use the tid for this instead, I cannot find a * situation in which this doesn't unambiguosly identify the * PDP context. */ hash_tid = gtp0_hashfn(pctx->u.v0.tid) % gtp->hash_size; break; case GTP_V1: hash_tid = gtp1u_hashfn(pctx->u.v1.i_tei) % gtp->hash_size; break; } hlist_add_head_rcu(&pctx->hlist_addr, &gtp->addr_hash[hash_ms]); hlist_add_head_rcu(&pctx->hlist_tid, &gtp->tid_hash[hash_tid]); switch (pctx->gtp_version) { case GTP_V0: netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n", pctx->u.v0.tid, &pctx->peer.addr, &pctx->ms.addr, pctx); break; case GTP_V1: netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n", pctx->u.v1.i_tei, pctx->u.v1.o_tei, &pctx->peer.addr, &pctx->ms.addr, pctx); break; } return pctx; } static void pdp_context_free(struct rcu_head *head) { struct pdp_ctx *pctx = container_of(head, struct pdp_ctx, rcu_head); sock_put(pctx->sk); kfree(pctx); } static void pdp_context_delete(struct pdp_ctx *pctx) { hlist_del_rcu(&pctx->hlist_tid); hlist_del_rcu(&pctx->hlist_addr); call_rcu(&pctx->rcu_head, pdp_context_free); } static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation); static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info) { unsigned int version; struct pdp_ctx *pctx; struct gtp_dev *gtp; struct sock *sk; int err; if (!info->attrs[GTPA_VERSION] || !info->attrs[GTPA_LINK]) return -EINVAL; version = nla_get_u32(info->attrs[GTPA_VERSION]); switch (version) { case GTP_V0: if (!info->attrs[GTPA_TID] || !info->attrs[GTPA_FLOW]) return -EINVAL; break; case GTP_V1: if (!info->attrs[GTPA_I_TEI] || !info->attrs[GTPA_O_TEI]) return -EINVAL; break; default: return -EINVAL; } rtnl_lock(); gtp = gtp_find_dev(sock_net(skb->sk), info->attrs); if (!gtp) { err = -ENODEV; goto out_unlock; } if (version == GTP_V0) sk = gtp->sk0; else if (version == GTP_V1) sk = gtp->sk1u; else sk = NULL; if (!sk) { err = -ENODEV; goto out_unlock; } pctx = gtp_pdp_add(gtp, sk, info); if (IS_ERR(pctx)) { err = PTR_ERR(pctx); } else { gtp_tunnel_notify(pctx, GTP_CMD_NEWPDP, GFP_KERNEL); err = 0; } out_unlock: rtnl_unlock(); return err; } static struct pdp_ctx *gtp_find_pdp_by_link(struct net *net, struct nlattr *nla[]) { struct gtp_dev *gtp; int family; family = nla_get_u8_default(nla[GTPA_FAMILY], AF_INET); gtp = gtp_find_dev(net, nla); if (!gtp) return ERR_PTR(-ENODEV); if (nla[GTPA_MS_ADDRESS]) { __be32 ip = nla_get_be32(nla[GTPA_MS_ADDRESS]); if (family != AF_INET) return ERR_PTR(-EINVAL); return ipv4_pdp_find(gtp, ip); } else if (nla[GTPA_MS_ADDR6]) { struct in6_addr addr = nla_get_in6_addr(nla[GTPA_MS_ADDR6]); if (family != AF_INET6) return ERR_PTR(-EINVAL); if (addr.s6_addr32[2] || addr.s6_addr32[3]) return ERR_PTR(-EADDRNOTAVAIL); return ipv6_pdp_find(gtp, &addr); } else if (nla[GTPA_VERSION]) { u32 gtp_version = nla_get_u32(nla[GTPA_VERSION]); if (gtp_version == GTP_V0 && nla[GTPA_TID]) { return gtp0_pdp_find(gtp, nla_get_u64(nla[GTPA_TID]), family); } else if (gtp_version == GTP_V1 && nla[GTPA_I_TEI]) { return gtp1_pdp_find(gtp, nla_get_u32(nla[GTPA_I_TEI]), family); } } return ERR_PTR(-EINVAL); } static struct pdp_ctx *gtp_find_pdp(struct net *net, struct nlattr *nla[]) { struct pdp_ctx *pctx; if (nla[GTPA_LINK]) pctx = gtp_find_pdp_by_link(net, nla); else pctx = ERR_PTR(-EINVAL); if (!pctx) pctx = ERR_PTR(-ENOENT); return pctx; } static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info) { struct pdp_ctx *pctx; int err = 0; if (!info->attrs[GTPA_VERSION]) return -EINVAL; rcu_read_lock(); pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs); if (IS_ERR(pctx)) { err = PTR_ERR(pctx); goto out_unlock; } if (pctx->gtp_version == GTP_V0) netdev_dbg(pctx->dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n", pctx->u.v0.tid, pctx); else if (pctx->gtp_version == GTP_V1) netdev_dbg(pctx->dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n", pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx); gtp_tunnel_notify(pctx, GTP_CMD_DELPDP, GFP_ATOMIC); pdp_context_delete(pctx); out_unlock: rcu_read_unlock(); return err; } static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq, int flags, u32 type, struct pdp_ctx *pctx) { void *genlh; genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, flags, type); if (genlh == NULL) goto nlmsg_failure; if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) || nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) || nla_put_u8(skb, GTPA_FAMILY, pctx->af)) goto nla_put_failure; switch (pctx->af) { case AF_INET: if (nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms.addr.s_addr)) goto nla_put_failure; break; case AF_INET6: if (nla_put_in6_addr(skb, GTPA_MS_ADDR6, &pctx->ms.addr6)) goto nla_put_failure; break; } switch (pctx->sk->sk_family) { case AF_INET: if (nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer.addr.s_addr)) goto nla_put_failure; break; case AF_INET6: if (nla_put_in6_addr(skb, GTPA_PEER_ADDR6, &pctx->peer.addr6)) goto nla_put_failure; break; } switch (pctx->gtp_version) { case GTP_V0: if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) || nla_put_u16(skb, GTPA_FLOW, pctx->u.v0.flow)) goto nla_put_failure; break; case GTP_V1: if (nla_put_u32(skb, GTPA_I_TEI, pctx->u.v1.i_tei) || nla_put_u32(skb, GTPA_O_TEI, pctx->u.v1.o_tei)) goto nla_put_failure; break; } genlmsg_end(skb, genlh); return 0; nlmsg_failure: nla_put_failure: genlmsg_cancel(skb, genlh); return -EMSGSIZE; } static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation) { struct sk_buff *msg; int ret; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, allocation); if (!msg) return -ENOMEM; ret = gtp_genl_fill_info(msg, 0, 0, 0, cmd, pctx); if (ret < 0) { nlmsg_free(msg); return ret; } ret = genlmsg_multicast_netns(&gtp_genl_family, dev_net(pctx->dev), msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC); return ret; } static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info) { struct pdp_ctx *pctx = NULL; struct sk_buff *skb2; int err; if (!info->attrs[GTPA_VERSION]) return -EINVAL; rcu_read_lock(); pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs); if (IS_ERR(pctx)) { err = PTR_ERR(pctx); goto err_unlock; } skb2 = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); if (skb2 == NULL) { err = -ENOMEM; goto err_unlock; } err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, info->snd_seq, 0, info->nlhdr->nlmsg_type, pctx); if (err < 0) goto err_unlock_free; rcu_read_unlock(); return genlmsg_unicast(genl_info_net(info), skb2, info->snd_portid); err_unlock_free: kfree_skb(skb2); err_unlock: rcu_read_unlock(); return err; } static int gtp_genl_dump_pdp(struct sk_buff *skb, struct netlink_callback *cb) { struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp; int i, j, bucket = cb->args[0], skip = cb->args[1]; struct net *net = sock_net(skb->sk); struct net_device *dev; struct pdp_ctx *pctx; if (cb->args[4]) return 0; rcu_read_lock(); for_each_netdev_rcu(net, dev) { if (dev->rtnl_link_ops != &gtp_link_ops) continue; gtp = netdev_priv(dev); if (last_gtp && last_gtp != gtp) continue; else last_gtp = NULL; for (i = bucket; i < gtp->hash_size; i++) { j = 0; hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) { if (j >= skip && gtp_genl_fill_info(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh->nlmsg_type, pctx)) { cb->args[0] = i; cb->args[1] = j; cb->args[2] = (unsigned long)gtp; goto out; } j++; } skip = 0; } bucket = 0; } cb->args[4] = 1; out: rcu_read_unlock(); return skb->len; } static int gtp_genl_send_echo_req(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *skb_to_send; __be32 src_ip, dst_ip; unsigned int version; struct gtp_dev *gtp; struct flowi4 fl4; struct rtable *rt; struct sock *sk; __be16 port; int len; if (!info->attrs[GTPA_VERSION] || !info->attrs[GTPA_LINK] || !info->attrs[GTPA_PEER_ADDRESS] || !info->attrs[GTPA_MS_ADDRESS]) return -EINVAL; version = nla_get_u32(info->attrs[GTPA_VERSION]); dst_ip = nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]); src_ip = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); gtp = gtp_find_dev(sock_net(skb->sk), info->attrs); if (!gtp) return -ENODEV; if (!gtp->sk_created) return -EOPNOTSUPP; if (!(gtp->dev->flags & IFF_UP)) return -ENETDOWN; if (version == GTP_V0) { struct gtp0_header *gtp0_h; len = LL_RESERVED_SPACE(gtp->dev) + sizeof(struct gtp0_header) + sizeof(struct iphdr) + sizeof(struct udphdr); skb_to_send = netdev_alloc_skb_ip_align(gtp->dev, len); if (!skb_to_send) return -ENOMEM; sk = gtp->sk0; port = htons(GTP0_PORT); gtp0_h = skb_push(skb_to_send, sizeof(struct gtp0_header)); memset(gtp0_h, 0, sizeof(struct gtp0_header)); gtp0_build_echo_msg(gtp0_h, GTP_ECHO_REQ); } else if (version == GTP_V1) { struct gtp1_header_long *gtp1u_h; len = LL_RESERVED_SPACE(gtp->dev) + sizeof(struct gtp1_header_long) + sizeof(struct iphdr) + sizeof(struct udphdr); skb_to_send = netdev_alloc_skb_ip_align(gtp->dev, len); if (!skb_to_send) return -ENOMEM; sk = gtp->sk1u; port = htons(GTP1U_PORT); gtp1u_h = skb_push(skb_to_send, sizeof(struct gtp1_header_long)); memset(gtp1u_h, 0, sizeof(struct gtp1_header_long)); gtp1u_build_echo_msg(gtp1u_h, GTP_ECHO_REQ); } else { return -ENODEV; } rt = ip4_route_output_gtp(&fl4, sk, dst_ip, src_ip); if (IS_ERR(rt)) { netdev_dbg(gtp->dev, "no route for echo request to %pI4\n", &dst_ip); kfree_skb(skb_to_send); return -ENODEV; } udp_tunnel_xmit_skb(rt, sk, skb_to_send, fl4.saddr, fl4.daddr, fl4.flowi4_tos, ip4_dst_hoplimit(&rt->dst), 0, port, port, !net_eq(sock_net(sk), dev_net(gtp->dev)), false); return 0; } static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = { [GTPA_LINK] = { .type = NLA_U32, }, [GTPA_VERSION] = { .type = NLA_U32, }, [GTPA_TID] = { .type = NLA_U64, }, [GTPA_PEER_ADDRESS] = { .type = NLA_U32, }, [GTPA_MS_ADDRESS] = { .type = NLA_U32, }, [GTPA_FLOW] = { .type = NLA_U16, }, [GTPA_NET_NS_FD] = { .type = NLA_U32, }, [GTPA_I_TEI] = { .type = NLA_U32, }, [GTPA_O_TEI] = { .type = NLA_U32, }, [GTPA_PEER_ADDR6] = { .len = sizeof(struct in6_addr), }, [GTPA_MS_ADDR6] = { .len = sizeof(struct in6_addr), }, [GTPA_FAMILY] = { .type = NLA_U8, }, }; static const struct genl_small_ops gtp_genl_ops[] = { { .cmd = GTP_CMD_NEWPDP, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = gtp_genl_new_pdp, .flags = GENL_ADMIN_PERM, }, { .cmd = GTP_CMD_DELPDP, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = gtp_genl_del_pdp, .flags = GENL_ADMIN_PERM, }, { .cmd = GTP_CMD_GETPDP, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = gtp_genl_get_pdp, .dumpit = gtp_genl_dump_pdp, .flags = GENL_ADMIN_PERM, }, { .cmd = GTP_CMD_ECHOREQ, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = gtp_genl_send_echo_req, .flags = GENL_ADMIN_PERM, }, }; static struct genl_family gtp_genl_family __ro_after_init = { .name = "gtp", .version = 0, .hdrsize = 0, .maxattr = GTPA_MAX, .policy = gtp_genl_policy, .netnsok = true, .module = THIS_MODULE, .small_ops = gtp_genl_ops, .n_small_ops = ARRAY_SIZE(gtp_genl_ops), .resv_start_op = GTP_CMD_ECHOREQ + 1, .mcgrps = gtp_genl_mcgrps, .n_mcgrps = ARRAY_SIZE(gtp_genl_mcgrps), }; static int __net_init gtp_net_init(struct net *net) { struct gtp_net *gn = net_generic(net, gtp_net_id); INIT_LIST_HEAD(&gn->gtp_dev_list); return 0; } static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list, struct list_head *dev_to_kill) { struct net *net; list_for_each_entry(net, net_list, exit_list) { struct gtp_net *gn = net_generic(net, gtp_net_id); struct gtp_dev *gtp, *gtp_next; list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list) gtp_dellink(gtp->dev, dev_to_kill); } } static struct pernet_operations gtp_net_ops = { .init = gtp_net_init, .exit_batch_rtnl = gtp_net_exit_batch_rtnl, .id = &gtp_net_id, .size = sizeof(struct gtp_net), }; static int __init gtp_init(void) { int err; get_random_bytes(&gtp_h_initval, sizeof(gtp_h_initval)); err = register_pernet_subsys(&gtp_net_ops); if (err < 0) goto error_out; err = rtnl_link_register(&gtp_link_ops); if (err < 0) goto unreg_pernet_subsys; err = genl_register_family(&gtp_genl_family); if (err < 0) goto unreg_rtnl_link; pr_info("GTP module loaded (pdp ctx size %zd bytes)\n", sizeof(struct pdp_ctx)); return 0; unreg_rtnl_link: rtnl_link_unregister(&gtp_link_ops); unreg_pernet_subsys: unregister_pernet_subsys(&gtp_net_ops); error_out: pr_err("error loading GTP module loaded\n"); return err; } late_initcall(gtp_init); static void __exit gtp_fini(void) { genl_unregister_family(&gtp_genl_family); rtnl_link_unregister(&gtp_link_ops); unregister_pernet_subsys(&gtp_net_ops); pr_info("GTP module unloaded\n"); } module_exit(gtp_fini); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>"); MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic"); MODULE_ALIAS_RTNL_LINK("gtp"); MODULE_ALIAS_GENL_FAMILY("gtp");
3 3 3 183 206 206 28 49 50 49 50 50 178 429 178 427 28 28 28 28 72 72 72 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 // SPDX-License-Identifier: GPL-2.0-only #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/netfilter.h> #include <linux/rhashtable.h> #include <linux/netdevice.h> #include <net/ip.h> #include <net/ip6_route.h> #include <net/netfilter/nf_tables.h> #include <net/netfilter/nf_flow_table.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_tuple.h> static DEFINE_MUTEX(flowtable_lock); static LIST_HEAD(flowtables); static void flow_offload_fill_dir(struct flow_offload *flow, enum flow_offload_tuple_dir dir) { struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple; struct nf_conntrack_tuple *ctt = &flow->ct->tuplehash[dir].tuple; ft->dir = dir; switch (ctt->src.l3num) { case NFPROTO_IPV4: ft->src_v4 = ctt->src.u3.in; ft->dst_v4 = ctt->dst.u3.in; break; case NFPROTO_IPV6: ft->src_v6 = ctt->src.u3.in6; ft->dst_v6 = ctt->dst.u3.in6; break; } ft->l3proto = ctt->src.l3num; ft->l4proto = ctt->dst.protonum; switch (ctt->dst.protonum) { case IPPROTO_TCP: case IPPROTO_UDP: ft->src_port = ctt->src.u.tcp.port; ft->dst_port = ctt->dst.u.tcp.port; break; } } struct flow_offload *flow_offload_alloc(struct nf_conn *ct) { struct flow_offload *flow; if (unlikely(nf_ct_is_dying(ct))) return NULL; flow = kzalloc(sizeof(*flow), GFP_ATOMIC); if (!flow) return NULL; refcount_inc(&ct->ct_general.use); flow->ct = ct; flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL); flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY); if (ct->status & IPS_SRC_NAT) __set_bit(NF_FLOW_SNAT, &flow->flags); if (ct->status & IPS_DST_NAT) __set_bit(NF_FLOW_DNAT, &flow->flags); return flow; } EXPORT_SYMBOL_GPL(flow_offload_alloc); static u32 flow_offload_dst_cookie(struct flow_offload_tuple *flow_tuple) { if (flow_tuple->l3proto == NFPROTO_IPV6) return rt6_get_cookie(dst_rt6_info(flow_tuple->dst_cache)); return 0; } static struct dst_entry *nft_route_dst_fetch(struct nf_flow_route *route, enum flow_offload_tuple_dir dir) { struct dst_entry *dst = route->tuple[dir].dst; route->tuple[dir].dst = NULL; return dst; } static int flow_offload_fill_route(struct flow_offload *flow, struct nf_flow_route *route, enum flow_offload_tuple_dir dir) { struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple; struct dst_entry *dst = nft_route_dst_fetch(route, dir); int i, j = 0; switch (flow_tuple->l3proto) { case NFPROTO_IPV4: flow_tuple->mtu = ip_dst_mtu_maybe_forward(dst, true); break; case NFPROTO_IPV6: flow_tuple->mtu = ip6_dst_mtu_maybe_forward(dst, true); break; } flow_tuple->iifidx = route->tuple[dir].in.ifindex; for (i = route->tuple[dir].in.num_encaps - 1; i >= 0; i--) { flow_tuple->encap[j].id = route->tuple[dir].in.encap[i].id; flow_tuple->encap[j].proto = route->tuple[dir].in.encap[i].proto; if (route->tuple[dir].in.ingress_vlans & BIT(i)) flow_tuple->in_vlan_ingress |= BIT(j); j++; } flow_tuple->encap_num = route->tuple[dir].in.num_encaps; switch (route->tuple[dir].xmit_type) { case FLOW_OFFLOAD_XMIT_DIRECT: memcpy(flow_tuple->out.h_dest, route->tuple[dir].out.h_dest, ETH_ALEN); memcpy(flow_tuple->out.h_source, route->tuple[dir].out.h_source, ETH_ALEN); flow_tuple->out.ifidx = route->tuple[dir].out.ifindex; flow_tuple->out.hw_ifidx = route->tuple[dir].out.hw_ifindex; dst_release(dst); break; case FLOW_OFFLOAD_XMIT_XFRM: case FLOW_OFFLOAD_XMIT_NEIGH: flow_tuple->dst_cache = dst; flow_tuple->dst_cookie = flow_offload_dst_cookie(flow_tuple); break; default: WARN_ON_ONCE(1); break; } flow_tuple->xmit_type = route->tuple[dir].xmit_type; return 0; } static void nft_flow_dst_release(struct flow_offload *flow, enum flow_offload_tuple_dir dir) { if (flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH || flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM) dst_release(flow->tuplehash[dir].tuple.dst_cache); } void flow_offload_route_init(struct flow_offload *flow, struct nf_flow_route *route) { flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL); flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY); flow->type = NF_FLOW_OFFLOAD_ROUTE; } EXPORT_SYMBOL_GPL(flow_offload_route_init); static inline bool nf_flow_has_expired(const struct flow_offload *flow) { return nf_flow_timeout_delta(flow->timeout) <= 0; } static void flow_offload_fixup_tcp(struct nf_conn *ct, u8 tcp_state) { struct ip_ct_tcp *tcp = &ct->proto.tcp; spin_lock_bh(&ct->lock); if (tcp->state != tcp_state) tcp->state = tcp_state; /* syn packet triggers the TCP reopen case from conntrack. */ if (tcp->state == TCP_CONNTRACK_CLOSE) ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_CLOSE_INIT; /* Conntrack state is outdated due to offload bypass. * Clear IP_CT_TCP_FLAG_MAXACK_SET, otherwise conntracks * TCP reset validation will fail. */ tcp->seen[0].td_maxwin = 0; tcp->seen[0].flags &= ~IP_CT_TCP_FLAG_MAXACK_SET; tcp->seen[1].td_maxwin = 0; tcp->seen[1].flags &= ~IP_CT_TCP_FLAG_MAXACK_SET; spin_unlock_bh(&ct->lock); } static void flow_offload_fixup_ct(struct flow_offload *flow) { struct nf_conn *ct = flow->ct; struct net *net = nf_ct_net(ct); int l4num = nf_ct_protonum(ct); bool expired, closing = false; u32 offload_timeout = 0; s32 timeout; if (l4num == IPPROTO_TCP) { const struct nf_tcp_net *tn = nf_tcp_pernet(net); u8 tcp_state; /* Enter CLOSE state if fin/rst packet has been seen, this * allows TCP reopen from conntrack. Otherwise, pick up from * the last seen TCP state. */ closing = test_bit(NF_FLOW_CLOSING, &flow->flags); if (closing) { flow_offload_fixup_tcp(ct, TCP_CONNTRACK_CLOSE); timeout = READ_ONCE(tn->timeouts[TCP_CONNTRACK_CLOSE]); expired = false; } else { tcp_state = READ_ONCE(ct->proto.tcp.state); flow_offload_fixup_tcp(ct, tcp_state); timeout = READ_ONCE(tn->timeouts[tcp_state]); expired = nf_flow_has_expired(flow); } offload_timeout = READ_ONCE(tn->offload_timeout); } else if (l4num == IPPROTO_UDP) { const struct nf_udp_net *tn = nf_udp_pernet(net); enum udp_conntrack state = test_bit(IPS_SEEN_REPLY_BIT, &ct->status) ? UDP_CT_REPLIED : UDP_CT_UNREPLIED; timeout = READ_ONCE(tn->timeouts[state]); expired = nf_flow_has_expired(flow); offload_timeout = READ_ONCE(tn->offload_timeout); } else { return; } if (expired) timeout -= offload_timeout; if (timeout < 0) timeout = 0; if (closing || nf_flow_timeout_delta(READ_ONCE(ct->timeout)) > (__s32)timeout) nf_ct_refresh(ct, timeout); } static void flow_offload_route_release(struct flow_offload *flow) { nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL); nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_REPLY); } void flow_offload_free(struct flow_offload *flow) { switch (flow->type) { case NF_FLOW_OFFLOAD_ROUTE: flow_offload_route_release(flow); break; default: break; } nf_ct_put(flow->ct); kfree_rcu(flow, rcu_head); } EXPORT_SYMBOL_GPL(flow_offload_free); static u32 flow_offload_hash(const void *data, u32 len, u32 seed) { const struct flow_offload_tuple *tuple = data; return jhash(tuple, offsetof(struct flow_offload_tuple, __hash), seed); } static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed) { const struct flow_offload_tuple_rhash *tuplehash = data; return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, __hash), seed); } static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg, const void *ptr) { const struct flow_offload_tuple *tuple = arg->key; const struct flow_offload_tuple_rhash *x = ptr; if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, __hash))) return 1; return 0; } static const struct rhashtable_params nf_flow_offload_rhash_params = { .head_offset = offsetof(struct flow_offload_tuple_rhash, node), .hashfn = flow_offload_hash, .obj_hashfn = flow_offload_hash_obj, .obj_cmpfn = flow_offload_hash_cmp, .automatic_shrinking = true, }; unsigned long flow_offload_get_timeout(struct flow_offload *flow) { unsigned long timeout = NF_FLOW_TIMEOUT; struct net *net = nf_ct_net(flow->ct); int l4num = nf_ct_protonum(flow->ct); if (l4num == IPPROTO_TCP) { struct nf_tcp_net *tn = nf_tcp_pernet(net); timeout = tn->offload_timeout; } else if (l4num == IPPROTO_UDP) { struct nf_udp_net *tn = nf_udp_pernet(net); timeout = tn->offload_timeout; } return timeout; } int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow) { int err; flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow); err = rhashtable_insert_fast(&flow_table->rhashtable, &flow->tuplehash[0].node, nf_flow_offload_rhash_params); if (err < 0) return err; err = rhashtable_insert_fast(&flow_table->rhashtable, &flow->tuplehash[1].node, nf_flow_offload_rhash_params); if (err < 0) { rhashtable_remove_fast(&flow_table->rhashtable, &flow->tuplehash[0].node, nf_flow_offload_rhash_params); return err; } nf_ct_refresh(flow->ct, NF_CT_DAY); if (nf_flowtable_hw_offload(flow_table)) { __set_bit(NF_FLOW_HW, &flow->flags); nf_flow_offload_add(flow_table, flow); } return 0; } EXPORT_SYMBOL_GPL(flow_offload_add); void flow_offload_refresh(struct nf_flowtable *flow_table, struct flow_offload *flow, bool force) { u32 timeout; timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow); if (force || timeout - READ_ONCE(flow->timeout) > HZ) WRITE_ONCE(flow->timeout, timeout); else return; if (likely(!nf_flowtable_hw_offload(flow_table)) || test_bit(NF_FLOW_CLOSING, &flow->flags)) return; nf_flow_offload_add(flow_table, flow); } EXPORT_SYMBOL_GPL(flow_offload_refresh); static void flow_offload_del(struct nf_flowtable *flow_table, struct flow_offload *flow) { rhashtable_remove_fast(&flow_table->rhashtable, &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node, nf_flow_offload_rhash_params); rhashtable_remove_fast(&flow_table->rhashtable, &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node, nf_flow_offload_rhash_params); flow_offload_free(flow); } void flow_offload_teardown(struct flow_offload *flow) { clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status); if (!test_and_set_bit(NF_FLOW_TEARDOWN, &flow->flags)) flow_offload_fixup_ct(flow); } EXPORT_SYMBOL_GPL(flow_offload_teardown); struct flow_offload_tuple_rhash * flow_offload_lookup(struct nf_flowtable *flow_table, struct flow_offload_tuple *tuple) { struct flow_offload_tuple_rhash *tuplehash; struct flow_offload *flow; int dir; tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple, nf_flow_offload_rhash_params); if (!tuplehash) return NULL; dir = tuplehash->tuple.dir; flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]); if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) return NULL; if (unlikely(nf_ct_is_dying(flow->ct))) return NULL; return tuplehash; } EXPORT_SYMBOL_GPL(flow_offload_lookup); static int nf_flow_table_iterate(struct nf_flowtable *flow_table, void (*iter)(struct nf_flowtable *flowtable, struct flow_offload *flow, void *data), void *data) { struct flow_offload_tuple_rhash *tuplehash; struct rhashtable_iter hti; struct flow_offload *flow; int err = 0; rhashtable_walk_enter(&flow_table->rhashtable, &hti); rhashtable_walk_start(&hti); while ((tuplehash = rhashtable_walk_next(&hti))) { if (IS_ERR(tuplehash)) { if (PTR_ERR(tuplehash) != -EAGAIN) { err = PTR_ERR(tuplehash); break; } continue; } if (tuplehash->tuple.dir) continue; flow = container_of(tuplehash, struct flow_offload, tuplehash[0]); iter(flow_table, flow, data); } rhashtable_walk_stop(&hti); rhashtable_walk_exit(&hti); return err; } static bool nf_flow_custom_gc(struct nf_flowtable *flow_table, const struct flow_offload *flow) { return flow_table->type->gc && flow_table->type->gc(flow); } /** * nf_flow_table_tcp_timeout() - new timeout of offloaded tcp entry * @ct: Flowtable offloaded tcp ct * * Return: number of seconds when ct entry should expire. */ static u32 nf_flow_table_tcp_timeout(const struct nf_conn *ct) { u8 state = READ_ONCE(ct->proto.tcp.state); switch (state) { case TCP_CONNTRACK_SYN_SENT: case TCP_CONNTRACK_SYN_RECV: return 0; case TCP_CONNTRACK_ESTABLISHED: return NF_CT_DAY; case TCP_CONNTRACK_FIN_WAIT: case TCP_CONNTRACK_CLOSE_WAIT: case TCP_CONNTRACK_LAST_ACK: case TCP_CONNTRACK_TIME_WAIT: return 5 * 60 * HZ; case TCP_CONNTRACK_CLOSE: return 0; } return 0; } /** * nf_flow_table_extend_ct_timeout() - Extend ct timeout of offloaded conntrack entry * @ct: Flowtable offloaded ct * * Datapath lookups in the conntrack table will evict nf_conn entries * if they have expired. * * Once nf_conn entries have been offloaded, nf_conntrack might not see any * packets anymore. Thus ct->timeout is no longer refreshed and ct can * be evicted. * * To avoid the need for an additional check on the offload bit for every * packet processed via nf_conntrack_in(), set an arbitrary timeout large * enough not to ever expire, this save us a check for the IPS_OFFLOAD_BIT * from the packet path via nf_ct_is_expired(). */ static void nf_flow_table_extend_ct_timeout(struct nf_conn *ct) { static const u32 min_timeout = 5 * 60 * HZ; u32 expires = nf_ct_expires(ct); /* normal case: large enough timeout, nothing to do. */ if (likely(expires >= min_timeout)) return; /* must check offload bit after this, we do not hold any locks. * flowtable and ct entries could have been removed on another CPU. */ if (!refcount_inc_not_zero(&ct->ct_general.use)) return; /* load ct->status after refcount increase */ smp_acquire__after_ctrl_dep(); if (nf_ct_is_confirmed(ct) && test_bit(IPS_OFFLOAD_BIT, &ct->status)) { u8 l4proto = nf_ct_protonum(ct); u32 new_timeout = true; switch (l4proto) { case IPPROTO_UDP: new_timeout = NF_CT_DAY; break; case IPPROTO_TCP: new_timeout = nf_flow_table_tcp_timeout(ct); break; default: WARN_ON_ONCE(1); break; } /* Update to ct->timeout from nf_conntrack happens * without holding ct->lock. * * Use cmpxchg to ensure timeout extension doesn't * happen when we race with conntrack datapath. * * The inverse -- datapath updating ->timeout right * after this -- is fine, datapath is authoritative. */ if (new_timeout) { new_timeout += nfct_time_stamp; cmpxchg(&ct->timeout, expires, new_timeout); } } nf_ct_put(ct); } static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table, struct flow_offload *flow, void *data) { bool teardown = test_bit(NF_FLOW_TEARDOWN, &flow->flags); if (nf_flow_has_expired(flow) || nf_ct_is_dying(flow->ct) || nf_flow_custom_gc(flow_table, flow)) { flow_offload_teardown(flow); teardown = true; } else if (!teardown) { nf_flow_table_extend_ct_timeout(flow->ct); } if (teardown) { if (test_bit(NF_FLOW_HW, &flow->flags)) { if (!test_bit(NF_FLOW_HW_DYING, &flow->flags)) nf_flow_offload_del(flow_table, flow); else if (test_bit(NF_FLOW_HW_DEAD, &flow->flags)) flow_offload_del(flow_table, flow); } else { flow_offload_del(flow_table, flow); } } else if (test_bit(NF_FLOW_CLOSING, &flow->flags) && test_bit(NF_FLOW_HW, &flow->flags) && !test_bit(NF_FLOW_HW_DYING, &flow->flags)) { nf_flow_offload_del(flow_table, flow); } else if (test_bit(NF_FLOW_HW, &flow->flags)) { nf_flow_offload_stats(flow_table, flow); } } void nf_flow_table_gc_run(struct nf_flowtable *flow_table) { nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, NULL); } static void nf_flow_offload_work_gc(struct work_struct *work) { struct nf_flowtable *flow_table; flow_table = container_of(work, struct nf_flowtable, gc_work.work); nf_flow_table_gc_run(flow_table); queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ); } static void nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff, __be16 port, __be16 new_port) { struct tcphdr *tcph; tcph = (void *)(skb_network_header(skb) + thoff); inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false); } static void nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff, __be16 port, __be16 new_port) { struct udphdr *udph; udph = (void *)(skb_network_header(skb) + thoff); if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) { inet_proto_csum_replace2(&udph->check, skb, port, new_port, false); if (!udph->check) udph->check = CSUM_MANGLED_0; } } static void nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff, u8 protocol, __be16 port, __be16 new_port) { switch (protocol) { case IPPROTO_TCP: nf_flow_nat_port_tcp(skb, thoff, port, new_port); break; case IPPROTO_UDP: nf_flow_nat_port_udp(skb, thoff, port, new_port); break; } } void nf_flow_snat_port(const struct flow_offload *flow, struct sk_buff *skb, unsigned int thoff, u8 protocol, enum flow_offload_tuple_dir dir) { struct flow_ports *hdr; __be16 port, new_port; hdr = (void *)(skb_network_header(skb) + thoff); switch (dir) { case FLOW_OFFLOAD_DIR_ORIGINAL: port = hdr->source; new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port; hdr->source = new_port; break; case FLOW_OFFLOAD_DIR_REPLY: port = hdr->dest; new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port; hdr->dest = new_port; break; } nf_flow_nat_port(skb, thoff, protocol, port, new_port); } EXPORT_SYMBOL_GPL(nf_flow_snat_port); void nf_flow_dnat_port(const struct flow_offload *flow, struct sk_buff *skb, unsigned int thoff, u8 protocol, enum flow_offload_tuple_dir dir) { struct flow_ports *hdr; __be16 port, new_port; hdr = (void *)(skb_network_header(skb) + thoff); switch (dir) { case FLOW_OFFLOAD_DIR_ORIGINAL: port = hdr->dest; new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port; hdr->dest = new_port; break; case FLOW_OFFLOAD_DIR_REPLY: port = hdr->source; new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port; hdr->source = new_port; break; } nf_flow_nat_port(skb, thoff, protocol, port, new_port); } EXPORT_SYMBOL_GPL(nf_flow_dnat_port); int nf_flow_table_init(struct nf_flowtable *flowtable) { int err; INIT_DELAYED_WORK(&flowtable->gc_work, nf_flow_offload_work_gc); flow_block_init(&flowtable->flow_block); init_rwsem(&flowtable->flow_block_lock); err = rhashtable_init(&flowtable->rhashtable, &nf_flow_offload_rhash_params); if (err < 0) return err; queue_delayed_work(system_power_efficient_wq, &flowtable->gc_work, HZ); mutex_lock(&flowtable_lock); list_add(&flowtable->list, &flowtables); mutex_unlock(&flowtable_lock); return 0; } EXPORT_SYMBOL_GPL(nf_flow_table_init); static void nf_flow_table_do_cleanup(struct nf_flowtable *flow_table, struct flow_offload *flow, void *data) { struct net_device *dev = data; if (!dev) { flow_offload_teardown(flow); return; } if (net_eq(nf_ct_net(flow->ct), dev_net(dev)) && (flow->tuplehash[0].tuple.iifidx == dev->ifindex || flow->tuplehash[1].tuple.iifidx == dev->ifindex)) flow_offload_teardown(flow); } void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable, struct net_device *dev) { nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev); flush_delayed_work(&flowtable->gc_work); nf_flow_table_offload_flush(flowtable); } void nf_flow_table_cleanup(struct net_device *dev) { struct nf_flowtable *flowtable; mutex_lock(&flowtable_lock); list_for_each_entry(flowtable, &flowtables, list) nf_flow_table_gc_cleanup(flowtable, dev); mutex_unlock(&flowtable_lock); } EXPORT_SYMBOL_GPL(nf_flow_table_cleanup); void nf_flow_table_free(struct nf_flowtable *flow_table) { mutex_lock(&flowtable_lock); list_del(&flow_table->list); mutex_unlock(&flowtable_lock); cancel_delayed_work_sync(&flow_table->gc_work); nf_flow_table_offload_flush(flow_table); /* ... no more pending work after this stage ... */ nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL); nf_flow_table_gc_run(flow_table); nf_flow_table_offload_flush_cleanup(flow_table); rhashtable_destroy(&flow_table->rhashtable); } EXPORT_SYMBOL_GPL(nf_flow_table_free); static int nf_flow_table_init_net(struct net *net) { net->ft.stat = alloc_percpu(struct nf_flow_table_stat); return net->ft.stat ? 0 : -ENOMEM; } static void nf_flow_table_fini_net(struct net *net) { free_percpu(net->ft.stat); } static int nf_flow_table_pernet_init(struct net *net) { int ret; ret = nf_flow_table_init_net(net); if (ret < 0) return ret; ret = nf_flow_table_init_proc(net); if (ret < 0) goto out_proc; return 0; out_proc: nf_flow_table_fini_net(net); return ret; } static void nf_flow_table_pernet_exit(struct list_head *net_exit_list) { struct net *net; list_for_each_entry(net, net_exit_list, exit_list) { nf_flow_table_fini_proc(net); nf_flow_table_fini_net(net); } } static struct pernet_operations nf_flow_table_net_ops = { .init = nf_flow_table_pernet_init, .exit_batch = nf_flow_table_pernet_exit, }; static int __init nf_flow_table_module_init(void) { int ret; ret = register_pernet_subsys(&nf_flow_table_net_ops); if (ret < 0) return ret; ret = nf_flow_table_offload_init(); if (ret) goto out_offload; ret = nf_flow_register_bpf(); if (ret) goto out_bpf; return 0; out_bpf: nf_flow_table_offload_exit(); out_offload: unregister_pernet_subsys(&nf_flow_table_net_ops); return ret; } static void __exit nf_flow_table_module_exit(void) { nf_flow_table_offload_exit(); unregister_pernet_subsys(&nf_flow_table_net_ops); } module_init(nf_flow_table_module_init); module_exit(nf_flow_table_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); MODULE_DESCRIPTION("Netfilter flow table module");
12 12 12 12 12 12 12 12 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SEQ_BUF_H #define _LINUX_SEQ_BUF_H #include <linux/bug.h> #include <linux/minmax.h> #include <linux/seq_file.h> #include <linux/types.h> /* * Trace sequences are used to allow a function to call several other functions * to create a string of data to use. */ /** * struct seq_buf - seq buffer structure * @buffer: pointer to the buffer * @size: size of the buffer * @len: the amount of data inside the buffer */ struct seq_buf { char *buffer; size_t size; size_t len; }; #define DECLARE_SEQ_BUF(NAME, SIZE) \ struct seq_buf NAME = { \ .buffer = (char[SIZE]) { 0 }, \ .size = SIZE, \ } static inline void seq_buf_clear(struct seq_buf *s) { s->len = 0; if (s->size) s->buffer[0] = '\0'; } static inline void seq_buf_init(struct seq_buf *s, char *buf, unsigned int size) { s->buffer = buf; s->size = size; seq_buf_clear(s); } /* * seq_buf have a buffer that might overflow. When this happens * len is set to be greater than size. */ static inline bool seq_buf_has_overflowed(struct seq_buf *s) { return s->len > s->size; } static inline void seq_buf_set_overflow(struct seq_buf *s) { s->len = s->size + 1; } /* * How much buffer is left on the seq_buf? */ static inline unsigned int seq_buf_buffer_left(struct seq_buf *s) { if (seq_buf_has_overflowed(s)) return 0; return s->size - s->len; } /* How much buffer was written? */ static inline unsigned int seq_buf_used(struct seq_buf *s) { return min(s->len, s->size); } /** * seq_buf_str - get NUL-terminated C string from seq_buf * @s: the seq_buf handle * * This makes sure that the buffer in @s is NUL-terminated and * safe to read as a string. * * Note, if this is called when the buffer has overflowed, then * the last byte of the buffer is zeroed, and the len will still * point passed it. * * After this function is called, s->buffer is safe to use * in string operations. * * Returns: @s->buf after making sure it is terminated. */ static inline const char *seq_buf_str(struct seq_buf *s) { if (WARN_ON(s->size == 0)) return ""; if (seq_buf_buffer_left(s)) s->buffer[s->len] = 0; else s->buffer[s->size - 1] = 0; return s->buffer; } /** * seq_buf_get_buf - get buffer to write arbitrary data to * @s: the seq_buf handle * @bufp: the beginning of the buffer is stored here * * Returns: the number of bytes available in the buffer, or zero if * there's no space. */ static inline size_t seq_buf_get_buf(struct seq_buf *s, char **bufp) { WARN_ON(s->len > s->size + 1); if (s->len < s->size) { *bufp = s->buffer + s->len; return s->size - s->len; } *bufp = NULL; return 0; } /** * seq_buf_commit - commit data to the buffer * @s: the seq_buf handle * @num: the number of bytes to commit * * Commit @num bytes of data written to a buffer previously acquired * by seq_buf_get_buf(). To signal an error condition, or that the data * didn't fit in the available space, pass a negative @num value. */ static inline void seq_buf_commit(struct seq_buf *s, int num) { if (num < 0) { seq_buf_set_overflow(s); } else { /* num must be negative on overflow */ BUG_ON(s->len + num > s->size); s->len += num; } } extern __printf(2, 3) int seq_buf_printf(struct seq_buf *s, const char *fmt, ...); extern __printf(2, 0) int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args); extern int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s); extern int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, size_t start, int cnt); extern int seq_buf_puts(struct seq_buf *s, const char *str); extern int seq_buf_putc(struct seq_buf *s, unsigned char c); extern int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len); extern int seq_buf_putmem_hex(struct seq_buf *s, const void *mem, unsigned int len); extern int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc); extern int seq_buf_hex_dump(struct seq_buf *s, const char *prefix_str, int prefix_type, int rowsize, int groupsize, const void *buf, size_t len, bool ascii); #ifdef CONFIG_BINARY_PRINTF __printf(2, 0) int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary); #endif void seq_buf_do_printk(struct seq_buf *s, const char *lvl); #endif /* _LINUX_SEQ_BUF_H */
109 109 2 36 13 13 3 13 6 1 4 4 2 3 2 2 33 33 33 1 33 31 31 2 31 5 5 5 5 1 1 1 1 1 4 4 4 4 4 4 5 2 2 2 2 2 2 2 2 2 12 3 2 5 5 5 1 5 6 9 1 3 9 40 6 40 36 32 28 10 31 31 31 31 26 26 5 2 31 31 12 2 2 10 28 27 26 26 27 31 32 28 10 27 31 31 33 9 9 9 33 1 33 33 9 9 3 9 9 33 1 1 1 33 33 113 113 113 112 109 112 2 108 109 112 3 3 1 109 109 112 3 2 109 109 112 112 14 112 113 1474 1474 1467 1391 1473 1402 5 5 1 5 4 1 3 3 3 3 2 2 2 1 2 3 2 3 2 2 217 3 101 101 101 101 33 66 66 31 217 217 216 216 215 217 217 215 218 217 218 217 215 217 215 217 217 217 217 217 217 213 206 212 209 213 16 216 105 5 110 14 2 14 14 12 2 14 15 16 2 4 3 3 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 101 33 58 44 102 1 35 48 41 41 41 41 10 1 34 40 48 10 93 93 91 93 92 21 20 20 20 20 1 177 104 1 177 1 178 177 1095 5 1092 1083 1010 1014 1014 1006 950 33 61 3 2 1 2 2 4 24 24 60 964 962 973 961 962 971 971 965 955 948 950 61 61 61 61 61 61 61 61 5 57 2 28 57 1 56 33 33 33 33 28 28 1246 1010 199 198 2 1236 131 130 1111 1111 1236 1246 963 3 958 2 93 178 94 94 178 179 94 2 177 270 268 1230 11 1210 117 1210 1 7 116 91 1093 1208 1207 1218 1218 1207 1207 1205 1217 1212 18 15 1207 1207 6 5 1204 1195 88 1180 92 1 1 92 91 1094 1 1 1 1 1093 1196 6 1190 5 6 96 107 101 1245 4 1208 1251 1252 1247 1260 1 11 4 8 9 8 8 3 3 2 3 1 7 3 3 2 2 1 5 7 7 19 19 17 14 14 14 14 14 14 13 3 20 1 19 19 19 5 12 1 1 1 11 4 22 20 20 8 22 35 1 1 35 35 1 35 33 33 102 278 289 289 1 289 1 288 289 289 289 289 98 98 7 1172 1170 7 7 5 7 117 31 30 115 26 24 15 13 13 12 12 12 11 11 11 104 104 103 103 45 102 102 102 102 3 99 99 14 85 99 99 3 4 3 2 1 2 1 2 1 2 1 3 2 4 2 4 2 4 3 2 2 1 2 11 4 3 3 7 7 6 5 7 4 3 2 1 4 196 94 84 132 82 131 2 131 11 182 2 195 182 118 116 117 107 80 3 2 1 1 77 76 76 2 1 2 2 2 2 3 2 6 2 5 3 2 1 2 2 1 12 2 10 2 1 8 8 1 8 1 4 6 5 1 3 1 3 3 2 2 4 3 3 2 2 2 1 1 3 5 5 1 1 16 7 162 11 78 2 1 194 7 1 7 1 6 7 22 22 22 32 32 31 2 1 1 1 2 72 17 2 1 1 15 2 8 1 3 3 15 15 15 14 15 1 15 1648 1651 276 15 66 65 4 2 2 3 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 // SPDX-License-Identifier: GPL-2.0-or-later /* * TUN - Universal TUN/TAP device driver. * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com> * * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $ */ /* * Changes: * * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 * Add TUNSETLINK ioctl to set the link encapsulation * * Mark Smith <markzzzsmith@yahoo.com.au> * Use eth_random_addr() for tap MAC address. * * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20 * Fixes in packet dropping, queue length setting and queue wakeup. * Increased default tx queue length. * Added ethtool API. * Minor cleanups * * Daniel Podlejski <underley@underley.eu.org> * Modifications for 2.3.99-pre5 kernel. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define DRV_NAME "tun" #define DRV_VERSION "1.6" #define DRV_DESCRIPTION "Universal TUN/TAP device driver" #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>" #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/major.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/miscdevice.h> #include <linux/ethtool.h> #include <linux/rtnetlink.h> #include <linux/compat.h> #include <linux/if.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/if_tun.h> #include <linux/if_vlan.h> #include <linux/crc32.h> #include <linux/math.h> #include <linux/nsproxy.h> #include <linux/virtio_net.h> #include <linux/rcupdate.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/rtnetlink.h> #include <net/sock.h> #include <net/xdp.h> #include <net/ip_tunnels.h> #include <linux/seq_file.h> #include <linux/uio.h> #include <linux/skb_array.h> #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <linux/mutex.h> #include <linux/ieee802154.h> #include <uapi/linux/if_ltalk.h> #include <uapi/linux/if_fddi.h> #include <uapi/linux/if_hippi.h> #include <uapi/linux/if_fc.h> #include <net/ax25.h> #include <net/rose.h> #include <net/6lowpan.h> #include <net/rps.h> #include <linux/uaccess.h> #include <linux/proc_fs.h> #include "tun_vnet.h" static void tun_default_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd); #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) /* TUN device flags */ /* IFF_ATTACH_QUEUE is never stored in device flags, * overload it to mean fasync when stored there. */ #define TUN_FASYNC IFF_ATTACH_QUEUE #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \ IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS) #define GOODCOPY_LEN 128 #define FLT_EXACT_COUNT 8 struct tap_filter { unsigned int count; /* Number of addrs. Zero means disabled */ u32 mask[2]; /* Mask of the hashed addrs */ unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; }; /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal * to max number of VCPUs in guest. */ #define MAX_TAP_QUEUES 256 #define MAX_TAP_FLOWS 4096 #define TUN_FLOW_EXPIRE (3 * HZ) /* A tun_file connects an open character device to a tuntap netdevice. It * also contains all socket related structures (except sock_fprog and tap_filter) * to serve as one transmit queue for tuntap device. The sock_fprog and * tap_filter were kept in tun_struct since they were used for filtering for the * netdevice not for a specific queue (at least I didn't see the requirement for * this). * * RCU usage: * The tun_file and tun_struct are loosely coupled, the pointer from one to the * other can only be read while rcu_read_lock or rtnl_lock is held. */ struct tun_file { struct sock sk; struct socket socket; struct tun_struct __rcu *tun; struct fasync_struct *fasync; /* only used for fasnyc */ unsigned int flags; union { u16 queue_index; unsigned int ifindex; }; struct napi_struct napi; bool napi_enabled; bool napi_frags_enabled; struct mutex napi_mutex; /* Protects access to the above napi */ struct list_head next; struct tun_struct *detached; struct ptr_ring tx_ring; struct xdp_rxq_info xdp_rxq; }; struct tun_page { struct page *page; int count; }; struct tun_flow_entry { struct hlist_node hash_link; struct rcu_head rcu; struct tun_struct *tun; u32 rxhash; u32 rps_rxhash; int queue_index; unsigned long updated ____cacheline_aligned_in_smp; }; #define TUN_NUM_FLOW_ENTRIES 1024 #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1) struct tun_prog { struct rcu_head rcu; struct bpf_prog *prog; }; /* Since the socket were moved to tun_file, to preserve the behavior of persist * device, socket filter, sndbuf and vnet header size were restore when the * file were attached to a persist device. */ struct tun_struct { struct tun_file __rcu *tfiles[MAX_TAP_QUEUES]; unsigned int numqueues; unsigned int flags; kuid_t owner; kgid_t group; struct net_device *dev; netdev_features_t set_features; #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4) int align; int vnet_hdr_sz; int sndbuf; struct tap_filter txflt; struct sock_fprog fprog; /* protected by rtnl lock */ bool filter_attached; u32 msg_enable; spinlock_t lock; struct hlist_head flows[TUN_NUM_FLOW_ENTRIES]; struct timer_list flow_gc_timer; unsigned long ageing_time; unsigned int numdisabled; struct list_head disabled; void *security; u32 flow_count; u32 rx_batched; atomic_long_t rx_frame_errors; struct bpf_prog __rcu *xdp_prog; struct tun_prog __rcu *steering_prog; struct tun_prog __rcu *filter_prog; struct ethtool_link_ksettings link_ksettings; /* init args */ struct file *file; struct ifreq *ifr; }; struct veth { __be16 h_vlan_proto; __be16 h_vlan_TCI; }; static void tun_flow_init(struct tun_struct *tun); static void tun_flow_uninit(struct tun_struct *tun); static int tun_napi_receive(struct napi_struct *napi, int budget) { struct tun_file *tfile = container_of(napi, struct tun_file, napi); struct sk_buff_head *queue = &tfile->sk.sk_write_queue; struct sk_buff_head process_queue; struct sk_buff *skb; int received = 0; __skb_queue_head_init(&process_queue); spin_lock(&queue->lock); skb_queue_splice_tail_init(queue, &process_queue); spin_unlock(&queue->lock); while (received < budget && (skb = __skb_dequeue(&process_queue))) { napi_gro_receive(napi, skb); ++received; } if (!skb_queue_empty(&process_queue)) { spin_lock(&queue->lock); skb_queue_splice(&process_queue, queue); spin_unlock(&queue->lock); } return received; } static int tun_napi_poll(struct napi_struct *napi, int budget) { unsigned int received; received = tun_napi_receive(napi, budget); if (received < budget) napi_complete_done(napi, received); return received; } static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, bool napi_en, bool napi_frags) { tfile->napi_enabled = napi_en; tfile->napi_frags_enabled = napi_en && napi_frags; if (napi_en) { netif_napi_add_tx(tun->dev, &tfile->napi, tun_napi_poll); napi_enable(&tfile->napi); } } static void tun_napi_enable(struct tun_file *tfile) { if (tfile->napi_enabled) napi_enable(&tfile->napi); } static void tun_napi_disable(struct tun_file *tfile) { if (tfile->napi_enabled) napi_disable(&tfile->napi); } static void tun_napi_del(struct tun_file *tfile) { if (tfile->napi_enabled) netif_napi_del(&tfile->napi); } static bool tun_napi_frags_enabled(const struct tun_file *tfile) { return tfile->napi_frags_enabled; } static inline u32 tun_hashfn(u32 rxhash) { return rxhash & TUN_MASK_FLOW_ENTRIES; } static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash) { struct tun_flow_entry *e; hlist_for_each_entry_rcu(e, head, hash_link) { if (e->rxhash == rxhash) return e; } return NULL; } static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun, struct hlist_head *head, u32 rxhash, u16 queue_index) { struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC); if (e) { netif_info(tun, tx_queued, tun->dev, "create flow: hash %u index %u\n", rxhash, queue_index); e->updated = jiffies; e->rxhash = rxhash; e->rps_rxhash = 0; e->queue_index = queue_index; e->tun = tun; hlist_add_head_rcu(&e->hash_link, head); ++tun->flow_count; } return e; } static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e) { netif_info(tun, tx_queued, tun->dev, "delete flow: hash %u index %u\n", e->rxhash, e->queue_index); hlist_del_rcu(&e->hash_link); kfree_rcu(e, rcu); --tun->flow_count; } static void tun_flow_flush(struct tun_struct *tun) { int i; spin_lock_bh(&tun->lock); for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { struct tun_flow_entry *e; struct hlist_node *n; hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) tun_flow_delete(tun, e); } spin_unlock_bh(&tun->lock); } static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index) { int i; spin_lock_bh(&tun->lock); for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { struct tun_flow_entry *e; struct hlist_node *n; hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { if (e->queue_index == queue_index) tun_flow_delete(tun, e); } } spin_unlock_bh(&tun->lock); } static void tun_flow_cleanup(struct timer_list *t) { struct tun_struct *tun = from_timer(tun, t, flow_gc_timer); unsigned long delay = tun->ageing_time; unsigned long next_timer = jiffies + delay; unsigned long count = 0; int i; spin_lock(&tun->lock); for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { struct tun_flow_entry *e; struct hlist_node *n; hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { unsigned long this_timer; this_timer = e->updated + delay; if (time_before_eq(this_timer, jiffies)) { tun_flow_delete(tun, e); continue; } count++; if (time_before(this_timer, next_timer)) next_timer = this_timer; } } if (count) mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer)); spin_unlock(&tun->lock); } static void tun_flow_update(struct tun_struct *tun, u32 rxhash, struct tun_file *tfile) { struct hlist_head *head; struct tun_flow_entry *e; unsigned long delay = tun->ageing_time; u16 queue_index = tfile->queue_index; head = &tun->flows[tun_hashfn(rxhash)]; rcu_read_lock(); e = tun_flow_find(head, rxhash); if (likely(e)) { /* TODO: keep queueing to old queue until it's empty? */ if (READ_ONCE(e->queue_index) != queue_index) WRITE_ONCE(e->queue_index, queue_index); if (e->updated != jiffies) e->updated = jiffies; sock_rps_record_flow_hash(e->rps_rxhash); } else { spin_lock_bh(&tun->lock); if (!tun_flow_find(head, rxhash) && tun->flow_count < MAX_TAP_FLOWS) tun_flow_create(tun, head, rxhash, queue_index); if (!timer_pending(&tun->flow_gc_timer)) mod_timer(&tun->flow_gc_timer, round_jiffies_up(jiffies + delay)); spin_unlock_bh(&tun->lock); } rcu_read_unlock(); } /* Save the hash received in the stack receive path and update the * flow_hash table accordingly. */ static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) { if (unlikely(e->rps_rxhash != hash)) e->rps_rxhash = hash; } /* We try to identify a flow through its rxhash. The reason that * we do not check rxq no. is because some cards(e.g 82599), chooses * the rxq based on the txq where the last packet of the flow comes. As * the userspace application move between processors, we may get a * different rxq no. here. */ static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb) { struct tun_flow_entry *e; u32 txq, numqueues; numqueues = READ_ONCE(tun->numqueues); txq = __skb_get_hash_symmetric(skb); e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); if (e) { tun_flow_save_rps_rxhash(e, txq); txq = e->queue_index; } else { txq = reciprocal_scale(txq, numqueues); } return txq; } static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb) { struct tun_prog *prog; u32 numqueues; u16 ret = 0; numqueues = READ_ONCE(tun->numqueues); if (!numqueues) return 0; prog = rcu_dereference(tun->steering_prog); if (prog) ret = bpf_prog_run_clear_cb(prog->prog, skb); return ret % numqueues; } static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { struct tun_struct *tun = netdev_priv(dev); u16 ret; rcu_read_lock(); if (rcu_dereference(tun->steering_prog)) ret = tun_ebpf_select_queue(tun, skb); else ret = tun_automq_select_queue(tun, skb); rcu_read_unlock(); return ret; } static inline bool tun_not_capable(struct tun_struct *tun) { const struct cred *cred = current_cred(); struct net *net = dev_net(tun->dev); return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || (gid_valid(tun->group) && !in_egroup_p(tun->group))) && !ns_capable(net->user_ns, CAP_NET_ADMIN); } static void tun_set_real_num_queues(struct tun_struct *tun) { netif_set_real_num_tx_queues(tun->dev, tun->numqueues); netif_set_real_num_rx_queues(tun->dev, tun->numqueues); } static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile) { tfile->detached = tun; list_add_tail(&tfile->next, &tun->disabled); ++tun->numdisabled; } static struct tun_struct *tun_enable_queue(struct tun_file *tfile) { struct tun_struct *tun = tfile->detached; tfile->detached = NULL; list_del_init(&tfile->next); --tun->numdisabled; return tun; } void tun_ptr_free(void *ptr) { if (!ptr) return; if (tun_is_xdp_frame(ptr)) { struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); xdp_return_frame(xdpf); } else { __skb_array_destroy_skb(ptr); } } EXPORT_SYMBOL_GPL(tun_ptr_free); static void tun_queue_purge(struct tun_file *tfile) { void *ptr; while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL) tun_ptr_free(ptr); skb_queue_purge(&tfile->sk.sk_write_queue); skb_queue_purge(&tfile->sk.sk_error_queue); } static void __tun_detach(struct tun_file *tfile, bool clean) { struct tun_file *ntfile; struct tun_struct *tun; tun = rtnl_dereference(tfile->tun); if (tun && clean) { if (!tfile->detached) tun_napi_disable(tfile); tun_napi_del(tfile); } if (tun && !tfile->detached) { u16 index = tfile->queue_index; BUG_ON(index >= tun->numqueues); rcu_assign_pointer(tun->tfiles[index], tun->tfiles[tun->numqueues - 1]); ntfile = rtnl_dereference(tun->tfiles[index]); ntfile->queue_index = index; ntfile->xdp_rxq.queue_index = index; rcu_assign_pointer(tun->tfiles[tun->numqueues - 1], NULL); --tun->numqueues; if (clean) { RCU_INIT_POINTER(tfile->tun, NULL); sock_put(&tfile->sk); } else { tun_disable_queue(tun, tfile); tun_napi_disable(tfile); } synchronize_net(); tun_flow_delete_by_queue(tun, tun->numqueues + 1); /* Drop read queue */ tun_queue_purge(tfile); tun_set_real_num_queues(tun); } else if (tfile->detached && clean) { tun = tun_enable_queue(tfile); sock_put(&tfile->sk); } if (clean) { if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { netif_carrier_off(tun->dev); if (!(tun->flags & IFF_PERSIST) && tun->dev->reg_state == NETREG_REGISTERED) unregister_netdevice(tun->dev); } if (tun) xdp_rxq_info_unreg(&tfile->xdp_rxq); ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free); } } static void tun_detach(struct tun_file *tfile, bool clean) { struct tun_struct *tun; struct net_device *dev; rtnl_lock(); tun = rtnl_dereference(tfile->tun); dev = tun ? tun->dev : NULL; __tun_detach(tfile, clean); if (dev) netdev_state_change(dev); rtnl_unlock(); if (clean) sock_put(&tfile->sk); } static void tun_detach_all(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); struct tun_file *tfile, *tmp; int i, n = tun->numqueues; for (i = 0; i < n; i++) { tfile = rtnl_dereference(tun->tfiles[i]); BUG_ON(!tfile); tun_napi_disable(tfile); tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; tfile->socket.sk->sk_data_ready(tfile->socket.sk); RCU_INIT_POINTER(tfile->tun, NULL); --tun->numqueues; } list_for_each_entry(tfile, &tun->disabled, next) { tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; tfile->socket.sk->sk_data_ready(tfile->socket.sk); RCU_INIT_POINTER(tfile->tun, NULL); } BUG_ON(tun->numqueues != 0); synchronize_net(); for (i = 0; i < n; i++) { tfile = rtnl_dereference(tun->tfiles[i]); tun_napi_del(tfile); /* Drop read queue */ tun_queue_purge(tfile); xdp_rxq_info_unreg(&tfile->xdp_rxq); sock_put(&tfile->sk); } list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { tun_napi_del(tfile); tun_enable_queue(tfile); tun_queue_purge(tfile); xdp_rxq_info_unreg(&tfile->xdp_rxq); sock_put(&tfile->sk); } BUG_ON(tun->numdisabled != 0); if (tun->flags & IFF_PERSIST) module_put(THIS_MODULE); } static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter, bool napi, bool napi_frags, bool publish_tun) { struct tun_file *tfile = file->private_data; struct net_device *dev = tun->dev; int err; err = security_tun_dev_attach(tfile->socket.sk, tun->security); if (err < 0) goto out; err = -EINVAL; if (rtnl_dereference(tfile->tun) && !tfile->detached) goto out; err = -EBUSY; if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1) goto out; err = -E2BIG; if (!tfile->detached && tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES) goto out; err = 0; /* Re-attach the filter to persist device */ if (!skip_filter && (tun->filter_attached == true)) { lock_sock(tfile->socket.sk); err = sk_attach_filter(&tun->fprog, tfile->socket.sk); release_sock(tfile->socket.sk); if (!err) goto out; } if (!tfile->detached && ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL, tun_ptr_free)) { err = -ENOMEM; goto out; } tfile->queue_index = tun->numqueues; tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN; if (tfile->detached) { /* Re-attach detached tfile, updating XDP queue_index */ WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq)); if (tfile->xdp_rxq.queue_index != tfile->queue_index) tfile->xdp_rxq.queue_index = tfile->queue_index; } else { /* Setup XDP RX-queue info, for new tfile getting attached */ err = xdp_rxq_info_reg(&tfile->xdp_rxq, tun->dev, tfile->queue_index, 0); if (err < 0) goto out; err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL); if (err < 0) { xdp_rxq_info_unreg(&tfile->xdp_rxq); goto out; } err = 0; } if (tfile->detached) { tun_enable_queue(tfile); tun_napi_enable(tfile); } else { sock_hold(&tfile->sk); tun_napi_init(tun, tfile, napi, napi_frags); } if (rtnl_dereference(tun->xdp_prog)) sock_set_flag(&tfile->sk, SOCK_XDP); /* device is allowed to go away first, so no need to hold extra * refcnt. */ /* Publish tfile->tun and tun->tfiles only after we've fully * initialized tfile; otherwise we risk using half-initialized * object. */ if (publish_tun) rcu_assign_pointer(tfile->tun, tun); rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); tun->numqueues++; tun_set_real_num_queues(tun); out: return err; } static struct tun_struct *tun_get(struct tun_file *tfile) { struct tun_struct *tun; rcu_read_lock(); tun = rcu_dereference(tfile->tun); if (tun) dev_hold(tun->dev); rcu_read_unlock(); return tun; } static void tun_put(struct tun_struct *tun) { dev_put(tun->dev); } /* TAP filtering */ static void addr_hash_set(u32 *mask, const u8 *addr) { int n = ether_crc(ETH_ALEN, addr) >> 26; mask[n >> 5] |= (1 << (n & 31)); } static unsigned int addr_hash_test(const u32 *mask, const u8 *addr) { int n = ether_crc(ETH_ALEN, addr) >> 26; return mask[n >> 5] & (1 << (n & 31)); } static int update_filter(struct tap_filter *filter, void __user *arg) { struct { u8 u[ETH_ALEN]; } *addr; struct tun_filter uf; int err, alen, n, nexact; if (copy_from_user(&uf, arg, sizeof(uf))) return -EFAULT; if (!uf.count) { /* Disabled */ filter->count = 0; return 0; } alen = ETH_ALEN * uf.count; addr = memdup_user(arg + sizeof(uf), alen); if (IS_ERR(addr)) return PTR_ERR(addr); /* The filter is updated without holding any locks. Which is * perfectly safe. We disable it first and in the worst * case we'll accept a few undesired packets. */ filter->count = 0; wmb(); /* Use first set of addresses as an exact filter */ for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++) memcpy(filter->addr[n], addr[n].u, ETH_ALEN); nexact = n; /* Remaining multicast addresses are hashed, * unicast will leave the filter disabled. */ memset(filter->mask, 0, sizeof(filter->mask)); for (; n < uf.count; n++) { if (!is_multicast_ether_addr(addr[n].u)) { err = 0; /* no filter */ goto free_addr; } addr_hash_set(filter->mask, addr[n].u); } /* For ALLMULTI just set the mask to all ones. * This overrides the mask populated above. */ if ((uf.flags & TUN_FLT_ALLMULTI)) memset(filter->mask, ~0, sizeof(filter->mask)); /* Now enable the filter */ wmb(); filter->count = nexact; /* Return the number of exact filters */ err = nexact; free_addr: kfree(addr); return err; } /* Returns: 0 - drop, !=0 - accept */ static int run_filter(struct tap_filter *filter, const struct sk_buff *skb) { /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect * at this point. */ struct ethhdr *eh = (struct ethhdr *) skb->data; int i; /* Exact match */ for (i = 0; i < filter->count; i++) if (ether_addr_equal(eh->h_dest, filter->addr[i])) return 1; /* Inexact match (multicast only) */ if (is_multicast_ether_addr(eh->h_dest)) return addr_hash_test(filter->mask, eh->h_dest); return 0; } /* * Checks whether the packet is accepted or not. * Returns: 0 - drop, !=0 - accept */ static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) { if (!filter->count) return 1; return run_filter(filter, skb); } /* Network device part of the driver */ static const struct ethtool_ops tun_ethtool_ops; static int tun_net_init(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); struct ifreq *ifr = tun->ifr; int err; spin_lock_init(&tun->lock); err = security_tun_dev_alloc_security(&tun->security); if (err < 0) return err; tun_flow_init(tun); dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; dev->features = dev->hw_features; dev->vlan_features = dev->features & ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX); dev->lltx = true; tun->flags = (tun->flags & ~TUN_FEATURES) | (ifr->ifr_flags & TUN_FEATURES); INIT_LIST_HEAD(&tun->disabled); err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI, ifr->ifr_flags & IFF_NAPI_FRAGS, false); if (err < 0) { tun_flow_uninit(tun); security_tun_dev_free_security(tun->security); return err; } return 0; } /* Net device detach from fd. */ static void tun_net_uninit(struct net_device *dev) { tun_detach_all(dev); } /* Net device open. */ static int tun_net_open(struct net_device *dev) { netif_tx_start_all_queues(dev); return 0; } /* Net device close. */ static int tun_net_close(struct net_device *dev) { netif_tx_stop_all_queues(dev); return 0; } /* Net device start xmit */ static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb) { #ifdef CONFIG_RPS if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) { /* Select queue was not called for the skbuff, so we extract the * RPS hash and save it into the flow_table here. */ struct tun_flow_entry *e; __u32 rxhash; rxhash = __skb_get_hash_symmetric(skb); e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash); if (e) tun_flow_save_rps_rxhash(e, rxhash); } #endif } static unsigned int run_ebpf_filter(struct tun_struct *tun, struct sk_buff *skb, int len) { struct tun_prog *prog = rcu_dereference(tun->filter_prog); if (prog) len = bpf_prog_run_clear_cb(prog->prog, skb); return len; } /* Net device start xmit */ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); enum skb_drop_reason drop_reason; int txq = skb->queue_mapping; struct netdev_queue *queue; struct tun_file *tfile; int len = skb->len; rcu_read_lock(); tfile = rcu_dereference(tun->tfiles[txq]); /* Drop packet if interface is not attached */ if (!tfile) { drop_reason = SKB_DROP_REASON_DEV_READY; goto drop; } if (!rcu_dereference(tun->steering_prog)) tun_automq_xmit(tun, skb); netif_info(tun, tx_queued, tun->dev, "%s %d\n", __func__, skb->len); /* Drop if the filter does not like it. * This is a noop if the filter is disabled. * Filter can be enabled only for the TAP devices. */ if (!check_filter(&tun->txflt, skb)) { drop_reason = SKB_DROP_REASON_TAP_TXFILTER; goto drop; } if (tfile->socket.sk->sk_filter && sk_filter(tfile->socket.sk, skb)) { drop_reason = SKB_DROP_REASON_SOCKET_FILTER; goto drop; } len = run_ebpf_filter(tun, skb, len); if (len == 0) { drop_reason = SKB_DROP_REASON_TAP_FILTER; goto drop; } if (pskb_trim(skb, len)) { drop_reason = SKB_DROP_REASON_NOMEM; goto drop; } if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) { drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT; goto drop; } skb_tx_timestamp(skb); /* Orphan the skb - required as we might hang on to it * for indefinite time. */ skb_orphan(skb); nf_reset_ct(skb); if (ptr_ring_produce(&tfile->tx_ring, skb)) { drop_reason = SKB_DROP_REASON_FULL_RING; goto drop; } /* dev->lltx requires to do our own update of trans_start */ queue = netdev_get_tx_queue(dev, txq); txq_trans_cond_update(queue); /* Notify and wake up reader process */ if (tfile->flags & TUN_FASYNC) kill_fasync(&tfile->fasync, SIGIO, POLL_IN); tfile->socket.sk->sk_data_ready(tfile->socket.sk); rcu_read_unlock(); return NETDEV_TX_OK; drop: dev_core_stats_tx_dropped_inc(dev); skb_tx_error(skb); kfree_skb_reason(skb, drop_reason); rcu_read_unlock(); return NET_XMIT_DROP; } static void tun_net_mclist(struct net_device *dev) { /* * This callback is supposed to deal with mc filter in * _rx_ path and has nothing to do with the _tx_ path. * In rx path we always accept everything userspace gives us. */ } static netdev_features_t tun_net_fix_features(struct net_device *dev, netdev_features_t features) { struct tun_struct *tun = netdev_priv(dev); return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); } static void tun_set_headroom(struct net_device *dev, int new_hr) { struct tun_struct *tun = netdev_priv(dev); if (new_hr < NET_SKB_PAD) new_hr = NET_SKB_PAD; tun->align = new_hr; } static void tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct tun_struct *tun = netdev_priv(dev); dev_get_tstats64(dev, stats); stats->rx_frame_errors += (unsigned long)atomic_long_read(&tun->rx_frame_errors); } static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog, struct netlink_ext_ack *extack) { struct tun_struct *tun = netdev_priv(dev); struct tun_file *tfile; struct bpf_prog *old_prog; int i; old_prog = rtnl_dereference(tun->xdp_prog); rcu_assign_pointer(tun->xdp_prog, prog); if (old_prog) bpf_prog_put(old_prog); for (i = 0; i < tun->numqueues; i++) { tfile = rtnl_dereference(tun->tfiles[i]); if (prog) sock_set_flag(&tfile->sk, SOCK_XDP); else sock_reset_flag(&tfile->sk, SOCK_XDP); } list_for_each_entry(tfile, &tun->disabled, next) { if (prog) sock_set_flag(&tfile->sk, SOCK_XDP); else sock_reset_flag(&tfile->sk, SOCK_XDP); } return 0; } static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: return tun_xdp_set(dev, xdp->prog, xdp->extack); default: return -EINVAL; } } static int tun_net_change_carrier(struct net_device *dev, bool new_carrier) { if (new_carrier) { struct tun_struct *tun = netdev_priv(dev); if (!tun->numqueues) return -EPERM; netif_carrier_on(dev); } else { netif_carrier_off(dev); } return 0; } static const struct net_device_ops tun_netdev_ops = { .ndo_init = tun_net_init, .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, .ndo_stop = tun_net_close, .ndo_start_xmit = tun_net_xmit, .ndo_fix_features = tun_net_fix_features, .ndo_select_queue = tun_select_queue, .ndo_set_rx_headroom = tun_set_headroom, .ndo_get_stats64 = tun_net_get_stats64, .ndo_change_carrier = tun_net_change_carrier, }; static void __tun_xdp_flush_tfile(struct tun_file *tfile) { /* Notify and wake up reader process */ if (tfile->flags & TUN_FASYNC) kill_fasync(&tfile->fasync, SIGIO, POLL_IN); tfile->socket.sk->sk_data_ready(tfile->socket.sk); } static int tun_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags) { struct tun_struct *tun = netdev_priv(dev); struct tun_file *tfile; u32 numqueues; int nxmit = 0; int i; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; rcu_read_lock(); resample: numqueues = READ_ONCE(tun->numqueues); if (!numqueues) { rcu_read_unlock(); return -ENXIO; /* Caller will free/return all frames */ } tfile = rcu_dereference(tun->tfiles[smp_processor_id() % numqueues]); if (unlikely(!tfile)) goto resample; spin_lock(&tfile->tx_ring.producer_lock); for (i = 0; i < n; i++) { struct xdp_frame *xdp = frames[i]; /* Encode the XDP flag into lowest bit for consumer to differ * XDP buffer from sk_buff. */ void *frame = tun_xdp_to_ptr(xdp); if (__ptr_ring_produce(&tfile->tx_ring, frame)) { dev_core_stats_tx_dropped_inc(dev); break; } nxmit++; } spin_unlock(&tfile->tx_ring.producer_lock); if (flags & XDP_XMIT_FLUSH) __tun_xdp_flush_tfile(tfile); rcu_read_unlock(); return nxmit; } static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) { struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp); int nxmit; if (unlikely(!frame)) return -EOVERFLOW; nxmit = tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH); if (!nxmit) xdp_return_frame_rx_napi(frame); return nxmit; } static const struct net_device_ops tap_netdev_ops = { .ndo_init = tun_net_init, .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, .ndo_stop = tun_net_close, .ndo_start_xmit = tun_net_xmit, .ndo_fix_features = tun_net_fix_features, .ndo_set_rx_mode = tun_net_mclist, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_select_queue = tun_select_queue, .ndo_features_check = passthru_features_check, .ndo_set_rx_headroom = tun_set_headroom, .ndo_bpf = tun_xdp, .ndo_xdp_xmit = tun_xdp_xmit, .ndo_change_carrier = tun_net_change_carrier, }; static void tun_flow_init(struct tun_struct *tun) { int i; for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) INIT_HLIST_HEAD(&tun->flows[i]); tun->ageing_time = TUN_FLOW_EXPIRE; timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0); mod_timer(&tun->flow_gc_timer, round_jiffies_up(jiffies + tun->ageing_time)); } static void tun_flow_uninit(struct tun_struct *tun) { timer_delete_sync(&tun->flow_gc_timer); tun_flow_flush(tun); } #define MIN_MTU 68 #define MAX_MTU 65535 /* Initialize net device. */ static void tun_net_initialize(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); switch (tun->flags & TUN_TYPE_MASK) { case IFF_TUN: dev->netdev_ops = &tun_netdev_ops; dev->header_ops = &ip_tunnel_header_ops; /* Point-to-Point TUN Device */ dev->hard_header_len = 0; dev->addr_len = 0; dev->mtu = 1500; /* Zero header length */ dev->type = ARPHRD_NONE; dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; break; case IFF_TAP: dev->netdev_ops = &tap_netdev_ops; /* Ethernet TAP Device */ ether_setup(dev); dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; eth_hw_addr_random(dev); /* Currently tun does not support XDP, only tap does. */ dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | NETDEV_XDP_ACT_NDO_XMIT; break; } dev->min_mtu = MIN_MTU; dev->max_mtu = MAX_MTU - dev->hard_header_len; } static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile) { struct sock *sk = tfile->socket.sk; return (tun->dev->flags & IFF_UP) && sock_writeable(sk); } /* Character device part */ /* Poll */ static __poll_t tun_chr_poll(struct file *file, poll_table *wait) { struct tun_file *tfile = file->private_data; struct tun_struct *tun = tun_get(tfile); struct sock *sk; __poll_t mask = 0; if (!tun) return EPOLLERR; sk = tfile->socket.sk; poll_wait(file, sk_sleep(sk), wait); if (!ptr_ring_empty(&tfile->tx_ring)) mask |= EPOLLIN | EPOLLRDNORM; /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to * guarantee EPOLLOUT to be raised by either here or * tun_sock_write_space(). Then process could get notification * after it writes to a down device and meets -EIO. */ if (tun_sock_writeable(tun, tfile) || (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && tun_sock_writeable(tun, tfile))) mask |= EPOLLOUT | EPOLLWRNORM; if (tun->dev->reg_state != NETREG_REGISTERED) mask = EPOLLERR; tun_put(tun); return mask; } static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, size_t len, const struct iov_iter *it) { struct sk_buff *skb; size_t linear; int err; int i; if (it->nr_segs > MAX_SKB_FRAGS + 1 || len > (ETH_MAX_MTU - NET_SKB_PAD - NET_IP_ALIGN)) return ERR_PTR(-EMSGSIZE); local_bh_disable(); skb = napi_get_frags(&tfile->napi); local_bh_enable(); if (!skb) return ERR_PTR(-ENOMEM); linear = iov_iter_single_seg_count(it); err = __skb_grow(skb, linear); if (err) goto free; skb->len = len; skb->data_len = len - linear; skb->truesize += skb->data_len; for (i = 1; i < it->nr_segs; i++) { const struct iovec *iov = iter_iov(it) + i; size_t fragsz = iov->iov_len; struct page *page; void *frag; if (fragsz == 0 || fragsz > PAGE_SIZE) { err = -EINVAL; goto free; } frag = netdev_alloc_frag(fragsz); if (!frag) { err = -ENOMEM; goto free; } page = virt_to_head_page(frag); skb_fill_page_desc(skb, i - 1, page, frag - page_address(page), fragsz); } return skb; free: /* frees skb and all frags allocated with napi_alloc_frag() */ napi_free_frags(&tfile->napi); return ERR_PTR(err); } /* prepad is the amount to reserve at front. len is length after that. * linear is a hint as to how much to copy (usually headers). */ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, size_t prepad, size_t len, size_t linear, int noblock) { struct sock *sk = tfile->socket.sk; struct sk_buff *skb; int err; /* Under a page? Don't bother with paged skb. */ if (prepad + len < PAGE_SIZE) linear = len; if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER); skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, &err, PAGE_ALLOC_COSTLY_ORDER); if (!skb) return ERR_PTR(err); skb_reserve(skb, prepad); skb_put(skb, linear); skb->data_len = len - linear; skb->len += len - linear; return skb; } static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, struct sk_buff *skb, int more) { struct sk_buff_head *queue = &tfile->sk.sk_write_queue; struct sk_buff_head process_queue; u32 rx_batched = tun->rx_batched; bool rcv = false; if (!rx_batched || (!more && skb_queue_empty(queue))) { local_bh_disable(); skb_record_rx_queue(skb, tfile->queue_index); netif_receive_skb(skb); local_bh_enable(); return; } spin_lock(&queue->lock); if (!more || skb_queue_len(queue) == rx_batched) { __skb_queue_head_init(&process_queue); skb_queue_splice_tail_init(queue, &process_queue); rcv = true; } else { __skb_queue_tail(queue, skb); } spin_unlock(&queue->lock); if (rcv) { struct sk_buff *nskb; local_bh_disable(); while ((nskb = __skb_dequeue(&process_queue))) { skb_record_rx_queue(nskb, tfile->queue_index); netif_receive_skb(nskb); } skb_record_rx_queue(skb, tfile->queue_index); netif_receive_skb(skb); local_bh_enable(); } } static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, int len, int noblock, bool zerocopy) { if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) return false; if (tfile->socket.sk->sk_sndbuf != INT_MAX) return false; if (!noblock) return false; if (zerocopy) return false; if (SKB_DATA_ALIGN(len + TUN_RX_PAD + XDP_PACKET_HEADROOM) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) return false; return true; } static struct sk_buff *__tun_build_skb(struct tun_file *tfile, struct page_frag *alloc_frag, char *buf, int buflen, int len, int pad, int metasize) { struct sk_buff *skb = build_skb(buf, buflen); if (!skb) return ERR_PTR(-ENOMEM); skb_reserve(skb, pad); skb_put(skb, len); if (metasize) skb_metadata_set(skb, metasize); skb_set_owner_w(skb, tfile->socket.sk); get_page(alloc_frag->page); alloc_frag->offset += buflen; return skb; } static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog, struct xdp_buff *xdp, u32 act) { int err; switch (act) { case XDP_REDIRECT: err = xdp_do_redirect(tun->dev, xdp, xdp_prog); if (err) { dev_core_stats_rx_dropped_inc(tun->dev); return err; } dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data); break; case XDP_TX: err = tun_xdp_tx(tun->dev, xdp); if (err < 0) { dev_core_stats_rx_dropped_inc(tun->dev); return err; } dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data); break; case XDP_PASS: break; default: bpf_warn_invalid_xdp_action(tun->dev, xdp_prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(tun->dev, xdp_prog, act); fallthrough; case XDP_DROP: dev_core_stats_rx_dropped_inc(tun->dev); break; } return act; } static struct sk_buff *tun_build_skb(struct tun_struct *tun, struct tun_file *tfile, struct iov_iter *from, struct virtio_net_hdr *hdr, int len, int *skb_xdp) { struct page_frag *alloc_frag = &current->task_frag; struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; struct bpf_prog *xdp_prog; int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); char *buf; size_t copied; int pad = TUN_RX_PAD; int metasize = 0; int err = 0; rcu_read_lock(); xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog) pad += XDP_PACKET_HEADROOM; buflen += SKB_DATA_ALIGN(len + pad); rcu_read_unlock(); alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES); if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) return ERR_PTR(-ENOMEM); buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; copied = copy_page_from_iter(alloc_frag->page, alloc_frag->offset + pad, len, from); if (copied != len) return ERR_PTR(-EFAULT); /* There's a small window that XDP may be set after the check * of xdp_prog above, this should be rare and for simplicity * we do XDP on skb in case the headroom is not enough. */ if (hdr->gso_type || !xdp_prog) { *skb_xdp = 1; return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad, metasize); } *skb_xdp = 0; local_bh_disable(); rcu_read_lock(); bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog) { struct xdp_buff xdp; u32 act; xdp_init_buff(&xdp, buflen, &tfile->xdp_rxq); xdp_prepare_buff(&xdp, buf, pad, len, true); act = bpf_prog_run_xdp(xdp_prog, &xdp); if (act == XDP_REDIRECT || act == XDP_TX) { get_page(alloc_frag->page); alloc_frag->offset += buflen; } err = tun_xdp_act(tun, xdp_prog, &xdp, act); if (err < 0) { if (act == XDP_REDIRECT || act == XDP_TX) put_page(alloc_frag->page); goto out; } if (err == XDP_REDIRECT) xdp_do_flush(); if (err != XDP_PASS) goto out; pad = xdp.data - xdp.data_hard_start; len = xdp.data_end - xdp.data; /* It is known that the xdp_buff was prepared with metadata * support, so the metasize will never be negative. */ metasize = xdp.data - xdp.data_meta; } bpf_net_ctx_clear(bpf_net_ctx); rcu_read_unlock(); local_bh_enable(); return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad, metasize); out: bpf_net_ctx_clear(bpf_net_ctx); rcu_read_unlock(); local_bh_enable(); return NULL; } /* Get packet from user space buffer */ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, void *msg_control, struct iov_iter *from, int noblock, bool more) { struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; struct sk_buff *skb; size_t total_len = iov_iter_count(from); size_t len = total_len, align = tun->align, linear; struct virtio_net_hdr gso = { 0 }; int good_linear; int copylen; int hdr_len = 0; bool zerocopy = false; int err; u32 rxhash = 0; int skb_xdp = 1; bool frags = tun_napi_frags_enabled(tfile); enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; if (!(tun->flags & IFF_NO_PI)) { if (len < sizeof(pi)) return -EINVAL; len -= sizeof(pi); if (!copy_from_iter_full(&pi, sizeof(pi), from)) return -EFAULT; } if (tun->flags & IFF_VNET_HDR) { int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); hdr_len = tun_vnet_hdr_get(vnet_hdr_sz, tun->flags, from, &gso); if (hdr_len < 0) return hdr_len; len -= vnet_hdr_sz; } if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { align += NET_IP_ALIGN; if (unlikely(len < ETH_HLEN || (hdr_len && hdr_len < ETH_HLEN))) return -EINVAL; } good_linear = SKB_MAX_HEAD(align); if (msg_control) { struct iov_iter i = *from; /* There are 256 bytes to be copied in skb, so there is * enough room for skb expand head in case it is used. * The rest of the buffer is mapped from userspace. */ copylen = min(hdr_len ? hdr_len : GOODCOPY_LEN, good_linear); linear = copylen; iov_iter_advance(&i, copylen); if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) zerocopy = true; } if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) { /* For the packet that is not easy to be processed * (e.g gso or jumbo packet), we will do it at after * skb was created with generic XDP routine. */ skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp); err = PTR_ERR_OR_ZERO(skb); if (err) goto drop; if (!skb) return total_len; } else { if (!zerocopy) { copylen = len; linear = min(hdr_len, good_linear); } if (frags) { mutex_lock(&tfile->napi_mutex); skb = tun_napi_alloc_frags(tfile, copylen, from); /* tun_napi_alloc_frags() enforces a layout for the skb. * If zerocopy is enabled, then this layout will be * overwritten by zerocopy_sg_from_iter(). */ zerocopy = false; } else { if (!linear) linear = min_t(size_t, good_linear, copylen); skb = tun_alloc_skb(tfile, align, copylen, linear, noblock); } err = PTR_ERR_OR_ZERO(skb); if (err) goto drop; if (zerocopy) err = zerocopy_sg_from_iter(skb, from); else err = skb_copy_datagram_from_iter(skb, 0, from, len); if (err) { err = -EFAULT; drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT; goto drop; } } if (tun_vnet_hdr_to_skb(tun->flags, skb, &gso)) { atomic_long_inc(&tun->rx_frame_errors); err = -EINVAL; goto free_skb; } switch (tun->flags & TUN_TYPE_MASK) { case IFF_TUN: if (tun->flags & IFF_NO_PI) { u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0; switch (ip_version) { case 4: pi.proto = htons(ETH_P_IP); break; case 6: pi.proto = htons(ETH_P_IPV6); break; default: err = -EINVAL; goto drop; } } skb_reset_mac_header(skb); skb->protocol = pi.proto; skb->dev = tun->dev; break; case IFF_TAP: if (frags && !pskb_may_pull(skb, ETH_HLEN)) { err = -ENOMEM; drop_reason = SKB_DROP_REASON_HDR_TRUNC; goto drop; } skb->protocol = eth_type_trans(skb, tun->dev); break; } /* copy skb_ubuf_info for callback when skb has no error */ if (zerocopy) { skb_zcopy_init(skb, msg_control); } else if (msg_control) { struct ubuf_info *uarg = msg_control; uarg->ops->complete(NULL, uarg, false); } skb_reset_network_header(skb); skb_probe_transport_header(skb); skb_record_rx_queue(skb, tfile->queue_index); if (skb_xdp) { struct bpf_prog *xdp_prog; int ret; local_bh_disable(); rcu_read_lock(); xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog) { ret = do_xdp_generic(xdp_prog, &skb); if (ret != XDP_PASS) { rcu_read_unlock(); local_bh_enable(); goto unlock_frags; } } rcu_read_unlock(); local_bh_enable(); } /* Compute the costly rx hash only if needed for flow updates. * We may get a very small possibility of OOO during switching, not * worth to optimize. */ if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 && !tfile->detached) rxhash = __skb_get_hash_symmetric(skb); rcu_read_lock(); if (unlikely(!(tun->dev->flags & IFF_UP))) { err = -EIO; rcu_read_unlock(); drop_reason = SKB_DROP_REASON_DEV_READY; goto drop; } if (frags) { u32 headlen; /* Exercise flow dissector code path. */ skb_push(skb, ETH_HLEN); headlen = eth_get_headlen(tun->dev, skb->data, skb_headlen(skb)); if (unlikely(headlen > skb_headlen(skb))) { WARN_ON_ONCE(1); err = -ENOMEM; dev_core_stats_rx_dropped_inc(tun->dev); napi_busy: napi_free_frags(&tfile->napi); rcu_read_unlock(); mutex_unlock(&tfile->napi_mutex); return err; } if (likely(napi_schedule_prep(&tfile->napi))) { local_bh_disable(); napi_gro_frags(&tfile->napi); napi_complete(&tfile->napi); local_bh_enable(); } else { err = -EBUSY; goto napi_busy; } mutex_unlock(&tfile->napi_mutex); } else if (tfile->napi_enabled) { struct sk_buff_head *queue = &tfile->sk.sk_write_queue; int queue_len; spin_lock_bh(&queue->lock); if (unlikely(tfile->detached)) { spin_unlock_bh(&queue->lock); rcu_read_unlock(); err = -EBUSY; goto free_skb; } __skb_queue_tail(queue, skb); queue_len = skb_queue_len(queue); spin_unlock(&queue->lock); if (!more || queue_len > NAPI_POLL_WEIGHT) napi_schedule(&tfile->napi); local_bh_enable(); } else if (!IS_ENABLED(CONFIG_4KSTACKS)) { tun_rx_batched(tun, tfile, skb, more); } else { netif_rx(skb); } rcu_read_unlock(); preempt_disable(); dev_sw_netstats_rx_add(tun->dev, len); preempt_enable(); if (rxhash) tun_flow_update(tun, rxhash, tfile); return total_len; drop: if (err != -EAGAIN) dev_core_stats_rx_dropped_inc(tun->dev); free_skb: if (!IS_ERR_OR_NULL(skb)) kfree_skb_reason(skb, drop_reason); unlock_frags: if (frags) { tfile->napi.skb = NULL; mutex_unlock(&tfile->napi_mutex); } return err ?: total_len; } static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct tun_file *tfile = file->private_data; struct tun_struct *tun = tun_get(tfile); ssize_t result; int noblock = 0; if (!tun) return -EBADFD; if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT)) noblock = 1; result = tun_get_user(tun, tfile, NULL, from, noblock, false); tun_put(tun); return result; } static ssize_t tun_put_user_xdp(struct tun_struct *tun, struct tun_file *tfile, struct xdp_frame *xdp_frame, struct iov_iter *iter) { int vnet_hdr_sz = 0; size_t size = xdp_frame->len; ssize_t ret; if (tun->flags & IFF_VNET_HDR) { struct virtio_net_hdr gso = { 0 }; vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); ret = tun_vnet_hdr_put(vnet_hdr_sz, iter, &gso); if (ret) return ret; } ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz; preempt_disable(); dev_sw_netstats_tx_add(tun->dev, 1, ret); preempt_enable(); return ret; } /* Put packet to the user space buffer */ static ssize_t tun_put_user(struct tun_struct *tun, struct tun_file *tfile, struct sk_buff *skb, struct iov_iter *iter) { struct tun_pi pi = { 0, skb->protocol }; ssize_t total; int vlan_offset = 0; int vlan_hlen = 0; int vnet_hdr_sz = 0; int ret; if (skb_vlan_tag_present(skb)) vlan_hlen = VLAN_HLEN; if (tun->flags & IFF_VNET_HDR) vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); total = skb->len + vlan_hlen + vnet_hdr_sz; if (!(tun->flags & IFF_NO_PI)) { if (iov_iter_count(iter) < sizeof(pi)) return -EINVAL; total += sizeof(pi); if (iov_iter_count(iter) < total) { /* Packet will be striped */ pi.flags |= TUN_PKT_STRIP; } if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi)) return -EFAULT; } if (vnet_hdr_sz) { struct virtio_net_hdr gso; ret = tun_vnet_hdr_from_skb(tun->flags, tun->dev, skb, &gso); if (ret) return ret; ret = tun_vnet_hdr_put(vnet_hdr_sz, iter, &gso); if (ret) return ret; } if (vlan_hlen) { int ret; struct veth veth; veth.h_vlan_proto = skb->vlan_proto; veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); if (ret || !iov_iter_count(iter)) goto done; ret = copy_to_iter(&veth, sizeof(veth), iter); if (ret != sizeof(veth) || !iov_iter_count(iter)) goto done; } skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); done: /* caller is in process context, */ preempt_disable(); dev_sw_netstats_tx_add(tun->dev, 1, skb->len + vlan_hlen); preempt_enable(); return total; } static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) { DECLARE_WAITQUEUE(wait, current); void *ptr = NULL; int error = 0; ptr = ptr_ring_consume(&tfile->tx_ring); if (ptr) goto out; if (noblock) { error = -EAGAIN; goto out; } add_wait_queue(&tfile->socket.wq.wait, &wait); while (1) { set_current_state(TASK_INTERRUPTIBLE); ptr = ptr_ring_consume(&tfile->tx_ring); if (ptr) break; if (signal_pending(current)) { error = -ERESTARTSYS; break; } if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) { error = -EFAULT; break; } schedule(); } __set_current_state(TASK_RUNNING); remove_wait_queue(&tfile->socket.wq.wait, &wait); out: *err = error; return ptr; } static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, struct iov_iter *to, int noblock, void *ptr) { ssize_t ret; int err; if (!iov_iter_count(to)) { tun_ptr_free(ptr); return 0; } if (!ptr) { /* Read frames from ring */ ptr = tun_ring_recv(tfile, noblock, &err); if (!ptr) return err; } if (tun_is_xdp_frame(ptr)) { struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); ret = tun_put_user_xdp(tun, tfile, xdpf, to); xdp_return_frame(xdpf); } else { struct sk_buff *skb = ptr; ret = tun_put_user(tun, tfile, skb, to); if (unlikely(ret < 0)) kfree_skb(skb); else consume_skb(skb); } return ret; } static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *file = iocb->ki_filp; struct tun_file *tfile = file->private_data; struct tun_struct *tun = tun_get(tfile); ssize_t len = iov_iter_count(to), ret; int noblock = 0; if (!tun) return -EBADFD; if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT)) noblock = 1; ret = tun_do_read(tun, tfile, to, noblock, NULL); ret = min_t(ssize_t, ret, len); if (ret > 0) iocb->ki_pos = ret; tun_put(tun); return ret; } static void tun_prog_free(struct rcu_head *rcu) { struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu); bpf_prog_destroy(prog->prog); kfree(prog); } static int __tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p, struct bpf_prog *prog) { struct tun_prog *old, *new = NULL; if (prog) { new = kmalloc(sizeof(*new), GFP_KERNEL); if (!new) return -ENOMEM; new->prog = prog; } spin_lock_bh(&tun->lock); old = rcu_dereference_protected(*prog_p, lockdep_is_held(&tun->lock)); rcu_assign_pointer(*prog_p, new); spin_unlock_bh(&tun->lock); if (old) call_rcu(&old->rcu, tun_prog_free); return 0; } static void tun_free_netdev(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); BUG_ON(!(list_empty(&tun->disabled))); tun_flow_uninit(tun); security_tun_dev_free_security(tun->security); __tun_set_ebpf(tun, &tun->steering_prog, NULL); __tun_set_ebpf(tun, &tun->filter_prog, NULL); } static void tun_setup(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); tun->owner = INVALID_UID; tun->group = INVALID_GID; tun_default_link_ksettings(dev, &tun->link_ksettings); dev->ethtool_ops = &tun_ethtool_ops; dev->needs_free_netdev = true; dev->priv_destructor = tun_free_netdev; /* We prefer our own queue length */ dev->tx_queue_len = TUN_READQ_SIZE; } /* Trivial set of netlink ops to allow deleting tun or tap * device with netlink. */ static int tun_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { NL_SET_ERR_MSG(extack, "tun/tap creation via rtnetlink is not supported."); return -EOPNOTSUPP; } static size_t tun_get_size(const struct net_device *dev) { BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t)); BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t)); return nla_total_size(sizeof(uid_t)) + /* OWNER */ nla_total_size(sizeof(gid_t)) + /* GROUP */ nla_total_size(sizeof(u8)) + /* TYPE */ nla_total_size(sizeof(u8)) + /* PI */ nla_total_size(sizeof(u8)) + /* VNET_HDR */ nla_total_size(sizeof(u8)) + /* PERSIST */ nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */ nla_total_size(sizeof(u32)) + /* NUM_QUEUES */ nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */ 0; } static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK)) goto nla_put_failure; if (uid_valid(tun->owner) && nla_put_u32(skb, IFLA_TUN_OWNER, from_kuid_munged(current_user_ns(), tun->owner))) goto nla_put_failure; if (gid_valid(tun->group) && nla_put_u32(skb, IFLA_TUN_GROUP, from_kgid_munged(current_user_ns(), tun->group))) goto nla_put_failure; if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI))) goto nla_put_failure; if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR))) goto nla_put_failure; if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST))) goto nla_put_failure; if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE, !!(tun->flags & IFF_MULTI_QUEUE))) goto nla_put_failure; if (tun->flags & IFF_MULTI_QUEUE) { if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues)) goto nla_put_failure; if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES, tun->numdisabled)) goto nla_put_failure; } return 0; nla_put_failure: return -EMSGSIZE; } static struct rtnl_link_ops tun_link_ops __read_mostly = { .kind = DRV_NAME, .priv_size = sizeof(struct tun_struct), .setup = tun_setup, .validate = tun_validate, .get_size = tun_get_size, .fill_info = tun_fill_info, }; static void tun_sock_write_space(struct sock *sk) { struct tun_file *tfile; wait_queue_head_t *wqueue; if (!sock_writeable(sk)) return; if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) return; wqueue = sk_sleep(sk); if (wqueue && waitqueue_active(wqueue)) wake_up_interruptible_sync_poll(wqueue, EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND); tfile = container_of(sk, struct tun_file, sk); kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); } static void tun_put_page(struct tun_page *tpage) { if (tpage->page) __page_frag_cache_drain(tpage->page, tpage->count); } static int tun_xdp_one(struct tun_struct *tun, struct tun_file *tfile, struct xdp_buff *xdp, int *flush, struct tun_page *tpage) { unsigned int datasize = xdp->data_end - xdp->data; struct tun_xdp_hdr *hdr = xdp->data_hard_start; struct virtio_net_hdr *gso = &hdr->gso; struct bpf_prog *xdp_prog; struct sk_buff *skb = NULL; struct sk_buff_head *queue; u32 rxhash = 0, act; int buflen = hdr->buflen; int metasize = 0; int ret = 0; bool skb_xdp = false; struct page *page; if (unlikely(datasize < ETH_HLEN)) return -EINVAL; xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog) { if (gso->gso_type) { skb_xdp = true; goto build; } xdp_init_buff(xdp, buflen, &tfile->xdp_rxq); act = bpf_prog_run_xdp(xdp_prog, xdp); ret = tun_xdp_act(tun, xdp_prog, xdp, act); if (ret < 0) { put_page(virt_to_head_page(xdp->data)); return ret; } switch (ret) { case XDP_REDIRECT: *flush = true; fallthrough; case XDP_TX: return 0; case XDP_PASS: break; default: page = virt_to_head_page(xdp->data); if (tpage->page == page) { ++tpage->count; } else { tun_put_page(tpage); tpage->page = page; tpage->count = 1; } return 0; } } build: skb = build_skb(xdp->data_hard_start, buflen); if (!skb) { ret = -ENOMEM; goto out; } skb_reserve(skb, xdp->data - xdp->data_hard_start); skb_put(skb, xdp->data_end - xdp->data); /* The externally provided xdp_buff may have no metadata support, which * is marked by xdp->data_meta being xdp->data + 1. This will lead to a * metasize of -1 and is the reason why the condition checks for > 0. */ metasize = xdp->data - xdp->data_meta; if (metasize > 0) skb_metadata_set(skb, metasize); if (tun_vnet_hdr_to_skb(tun->flags, skb, gso)) { atomic_long_inc(&tun->rx_frame_errors); kfree_skb(skb); ret = -EINVAL; goto out; } skb->protocol = eth_type_trans(skb, tun->dev); skb_reset_network_header(skb); skb_probe_transport_header(skb); skb_record_rx_queue(skb, tfile->queue_index); if (skb_xdp) { ret = do_xdp_generic(xdp_prog, &skb); if (ret != XDP_PASS) { ret = 0; goto out; } } if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 && !tfile->detached) rxhash = __skb_get_hash_symmetric(skb); if (tfile->napi_enabled) { queue = &tfile->sk.sk_write_queue; spin_lock(&queue->lock); if (unlikely(tfile->detached)) { spin_unlock(&queue->lock); kfree_skb(skb); return -EBUSY; } __skb_queue_tail(queue, skb); spin_unlock(&queue->lock); ret = 1; } else { netif_receive_skb(skb); ret = 0; } /* No need to disable preemption here since this function is * always called with bh disabled */ dev_sw_netstats_rx_add(tun->dev, datasize); if (rxhash) tun_flow_update(tun, rxhash, tfile); out: return ret; } static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) { int ret, i; struct tun_file *tfile = container_of(sock, struct tun_file, socket); struct tun_struct *tun = tun_get(tfile); struct tun_msg_ctl *ctl = m->msg_control; struct xdp_buff *xdp; if (!tun) return -EBADFD; if (m->msg_controllen == sizeof(struct tun_msg_ctl) && ctl && ctl->type == TUN_MSG_PTR) { struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; struct tun_page tpage; int n = ctl->num; int flush = 0, queued = 0; memset(&tpage, 0, sizeof(tpage)); local_bh_disable(); rcu_read_lock(); bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); for (i = 0; i < n; i++) { xdp = &((struct xdp_buff *)ctl->ptr)[i]; ret = tun_xdp_one(tun, tfile, xdp, &flush, &tpage); if (ret > 0) queued += ret; } if (flush) xdp_do_flush(); if (tfile->napi_enabled && queued > 0) napi_schedule(&tfile->napi); bpf_net_ctx_clear(bpf_net_ctx); rcu_read_unlock(); local_bh_enable(); tun_put_page(&tpage); ret = total_len; goto out; } ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter, m->msg_flags & MSG_DONTWAIT, m->msg_flags & MSG_MORE); out: tun_put(tun); return ret; } static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, int flags) { struct tun_file *tfile = container_of(sock, struct tun_file, socket); struct tun_struct *tun = tun_get(tfile); void *ptr = m->msg_control; int ret; if (!tun) { ret = -EBADFD; goto out_free; } if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { ret = -EINVAL; goto out_put_tun; } if (flags & MSG_ERRQUEUE) { ret = sock_recv_errqueue(sock->sk, m, total_len, SOL_PACKET, TUN_TX_TIMESTAMP); goto out; } ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr); if (ret > (ssize_t)total_len) { m->msg_flags |= MSG_TRUNC; ret = flags & MSG_TRUNC ? ret : total_len; } out: tun_put(tun); return ret; out_put_tun: tun_put(tun); out_free: tun_ptr_free(ptr); return ret; } static int tun_ptr_peek_len(void *ptr) { if (likely(ptr)) { if (tun_is_xdp_frame(ptr)) { struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); return xdpf->len; } return __skb_array_len_with_tag(ptr); } else { return 0; } } static int tun_peek_len(struct socket *sock) { struct tun_file *tfile = container_of(sock, struct tun_file, socket); struct tun_struct *tun; int ret = 0; tun = tun_get(tfile); if (!tun) return 0; ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len); tun_put(tun); return ret; } /* Ops structure to mimic raw sockets with tun */ static const struct proto_ops tun_socket_ops = { .peek_len = tun_peek_len, .sendmsg = tun_sendmsg, .recvmsg = tun_recvmsg, }; static struct proto tun_proto = { .name = "tun", .owner = THIS_MODULE, .obj_size = sizeof(struct tun_file), }; static int tun_flags(struct tun_struct *tun) { return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP); } static ssize_t tun_flags_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tun_struct *tun = netdev_priv(to_net_dev(dev)); return sysfs_emit(buf, "0x%x\n", tun_flags(tun)); } static ssize_t owner_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tun_struct *tun = netdev_priv(to_net_dev(dev)); return uid_valid(tun->owner)? sysfs_emit(buf, "%u\n", from_kuid_munged(current_user_ns(), tun->owner)) : sysfs_emit(buf, "-1\n"); } static ssize_t group_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tun_struct *tun = netdev_priv(to_net_dev(dev)); return gid_valid(tun->group) ? sysfs_emit(buf, "%u\n", from_kgid_munged(current_user_ns(), tun->group)) : sysfs_emit(buf, "-1\n"); } static DEVICE_ATTR_RO(tun_flags); static DEVICE_ATTR_RO(owner); static DEVICE_ATTR_RO(group); static struct attribute *tun_dev_attrs[] = { &dev_attr_tun_flags.attr, &dev_attr_owner.attr, &dev_attr_group.attr, NULL }; static const struct attribute_group tun_attr_group = { .attrs = tun_dev_attrs }; static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) { struct tun_struct *tun; struct tun_file *tfile = file->private_data; struct net_device *dev; int err; if (tfile->detached) return -EINVAL; if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) { if (!capable(CAP_NET_ADMIN)) return -EPERM; if (!(ifr->ifr_flags & IFF_NAPI) || (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP) return -EINVAL; } dev = __dev_get_by_name(net, ifr->ifr_name); if (dev) { if (ifr->ifr_flags & IFF_TUN_EXCL) return -EBUSY; if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) tun = netdev_priv(dev); else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops) tun = netdev_priv(dev); else return -EINVAL; if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) != !!(tun->flags & IFF_MULTI_QUEUE)) return -EINVAL; if (tun_not_capable(tun)) return -EPERM; err = security_tun_dev_open(tun->security); if (err < 0) return err; err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, ifr->ifr_flags & IFF_NAPI, ifr->ifr_flags & IFF_NAPI_FRAGS, true); if (err < 0) return err; if (tun->flags & IFF_MULTI_QUEUE && (tun->numqueues + tun->numdisabled > 1)) { /* One or more queue has already been attached, no need * to initialize the device again. */ netdev_state_change(dev); return 0; } tun->flags = (tun->flags & ~TUN_FEATURES) | (ifr->ifr_flags & TUN_FEATURES); netdev_state_change(dev); } else { char *name; unsigned long flags = 0; int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? MAX_TAP_QUEUES : 1; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; err = security_tun_dev_create(); if (err < 0) return err; /* Set dev type */ if (ifr->ifr_flags & IFF_TUN) { /* TUN device */ flags |= IFF_TUN; name = "tun%d"; } else if (ifr->ifr_flags & IFF_TAP) { /* TAP device */ flags |= IFF_TAP; name = "tap%d"; } else return -EINVAL; if (*ifr->ifr_name) name = ifr->ifr_name; dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, NET_NAME_UNKNOWN, tun_setup, queues, queues); if (!dev) return -ENOMEM; dev_net_set(dev, net); dev->rtnl_link_ops = &tun_link_ops; dev->ifindex = tfile->ifindex; dev->sysfs_groups[0] = &tun_attr_group; tun = netdev_priv(dev); tun->dev = dev; tun->flags = flags; tun->txflt.count = 0; tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); tun->align = NET_SKB_PAD; tun->filter_attached = false; tun->sndbuf = tfile->socket.sk->sk_sndbuf; tun->rx_batched = 0; RCU_INIT_POINTER(tun->steering_prog, NULL); tun->ifr = ifr; tun->file = file; tun_net_initialize(dev); err = register_netdevice(tun->dev); if (err < 0) { free_netdev(dev); return err; } /* free_netdev() won't check refcnt, to avoid race * with dev_put() we need publish tun after registration. */ rcu_assign_pointer(tfile->tun, tun); } if (ifr->ifr_flags & IFF_NO_CARRIER) netif_carrier_off(tun->dev); else netif_carrier_on(tun->dev); /* Make sure persistent devices do not get stuck in * xoff state. */ if (netif_running(tun->dev)) netif_tx_wake_all_queues(tun->dev); strcpy(ifr->ifr_name, tun->dev->name); return 0; } static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr) { strcpy(ifr->ifr_name, tun->dev->name); ifr->ifr_flags = tun_flags(tun); } /* This is like a cut-down ethtool ops, except done via tun fd so no * privs required. */ static int set_offload(struct tun_struct *tun, unsigned long arg) { netdev_features_t features = 0; if (arg & TUN_F_CSUM) { features |= NETIF_F_HW_CSUM; arg &= ~TUN_F_CSUM; if (arg & (TUN_F_TSO4|TUN_F_TSO6)) { if (arg & TUN_F_TSO_ECN) { features |= NETIF_F_TSO_ECN; arg &= ~TUN_F_TSO_ECN; } if (arg & TUN_F_TSO4) features |= NETIF_F_TSO; if (arg & TUN_F_TSO6) features |= NETIF_F_TSO6; arg &= ~(TUN_F_TSO4|TUN_F_TSO6); } arg &= ~TUN_F_UFO; /* TODO: for now USO4 and USO6 should work simultaneously */ if (arg & TUN_F_USO4 && arg & TUN_F_USO6) { features |= NETIF_F_GSO_UDP_L4; arg &= ~(TUN_F_USO4 | TUN_F_USO6); } } /* This gives the user a way to test for new features in future by * trying to set them. */ if (arg) return -EINVAL; tun->set_features = features; tun->dev->wanted_features &= ~TUN_USER_FEATURES; tun->dev->wanted_features |= features; netdev_update_features(tun->dev); return 0; } static void tun_detach_filter(struct tun_struct *tun, int n) { int i; struct tun_file *tfile; for (i = 0; i < n; i++) { tfile = rtnl_dereference(tun->tfiles[i]); lock_sock(tfile->socket.sk); sk_detach_filter(tfile->socket.sk); release_sock(tfile->socket.sk); } tun->filter_attached = false; } static int tun_attach_filter(struct tun_struct *tun) { int i, ret = 0; struct tun_file *tfile; for (i = 0; i < tun->numqueues; i++) { tfile = rtnl_dereference(tun->tfiles[i]); lock_sock(tfile->socket.sk); ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); release_sock(tfile->socket.sk); if (ret) { tun_detach_filter(tun, i); return ret; } } tun->filter_attached = true; return ret; } static void tun_set_sndbuf(struct tun_struct *tun) { struct tun_file *tfile; int i; for (i = 0; i < tun->numqueues; i++) { tfile = rtnl_dereference(tun->tfiles[i]); tfile->socket.sk->sk_sndbuf = tun->sndbuf; } } static int tun_set_queue(struct file *file, struct ifreq *ifr) { struct tun_file *tfile = file->private_data; struct tun_struct *tun; int ret = 0; rtnl_lock(); if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { tun = tfile->detached; if (!tun) { ret = -EINVAL; goto unlock; } ret = security_tun_dev_attach_queue(tun->security); if (ret < 0) goto unlock; ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI, tun->flags & IFF_NAPI_FRAGS, true); } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { tun = rtnl_dereference(tfile->tun); if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) ret = -EINVAL; else __tun_detach(tfile, false); } else ret = -EINVAL; if (ret >= 0) netdev_state_change(tun->dev); unlock: rtnl_unlock(); return ret; } static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p, void __user *data) { struct bpf_prog *prog; int fd; if (copy_from_user(&fd, data, sizeof(fd))) return -EFAULT; if (fd == -1) { prog = NULL; } else { prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); if (IS_ERR(prog)) return PTR_ERR(prog); } return __tun_set_ebpf(tun, prog_p, prog); } /* Return correct value for tun->dev->addr_len based on tun->dev->type. */ static unsigned char tun_get_addr_len(unsigned short type) { switch (type) { case ARPHRD_IP6GRE: case ARPHRD_TUNNEL6: return sizeof(struct in6_addr); case ARPHRD_IPGRE: case ARPHRD_TUNNEL: case ARPHRD_SIT: return 4; case ARPHRD_ETHER: return ETH_ALEN; case ARPHRD_IEEE802154: case ARPHRD_IEEE802154_MONITOR: return IEEE802154_EXTENDED_ADDR_LEN; case ARPHRD_PHONET_PIPE: case ARPHRD_PPP: case ARPHRD_NONE: return 0; case ARPHRD_6LOWPAN: return EUI64_ADDR_LEN; case ARPHRD_FDDI: return FDDI_K_ALEN; case ARPHRD_HIPPI: return HIPPI_ALEN; case ARPHRD_IEEE802: return FC_ALEN; case ARPHRD_ROSE: return ROSE_ADDR_LEN; case ARPHRD_NETROM: return AX25_ADDR_LEN; case ARPHRD_LOCALTLK: return LTALK_ALEN; default: return 0; } } static long __tun_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg, int ifreq_len) { struct tun_file *tfile = file->private_data; struct net *net = sock_net(&tfile->sk); struct tun_struct *tun; void __user* argp = (void __user*)arg; unsigned int carrier; struct ifreq ifr; kuid_t owner; kgid_t group; int ifindex; int sndbuf; int ret; bool do_notify = false; if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) { if (copy_from_user(&ifr, argp, ifreq_len)) return -EFAULT; } else { memset(&ifr, 0, sizeof(ifr)); } if (cmd == TUNGETFEATURES) { /* Currently this just means: "what IFF flags are valid?". * This is needed because we never checked for invalid flags on * TUNSETIFF. */ return put_user(IFF_TUN | IFF_TAP | IFF_NO_CARRIER | TUN_FEATURES, (unsigned int __user*)argp); } else if (cmd == TUNSETQUEUE) { return tun_set_queue(file, &ifr); } else if (cmd == SIOCGSKNS) { if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; return open_related_ns(&net->ns, get_net_ns); } rtnl_lock(); tun = tun_get(tfile); if (cmd == TUNSETIFF) { ret = -EEXIST; if (tun) goto unlock; ifr.ifr_name[IFNAMSIZ-1] = '\0'; ret = tun_set_iff(net, file, &ifr); if (ret) goto unlock; if (copy_to_user(argp, &ifr, ifreq_len)) ret = -EFAULT; goto unlock; } if (cmd == TUNSETIFINDEX) { ret = -EPERM; if (tun) goto unlock; ret = -EFAULT; if (copy_from_user(&ifindex, argp, sizeof(ifindex))) goto unlock; ret = -EINVAL; if (ifindex < 0) goto unlock; ret = 0; tfile->ifindex = ifindex; goto unlock; } ret = -EBADFD; if (!tun) goto unlock; netif_info(tun, drv, tun->dev, "tun_chr_ioctl cmd %u\n", cmd); net = dev_net(tun->dev); ret = 0; switch (cmd) { case TUNGETIFF: tun_get_iff(tun, &ifr); if (tfile->detached) ifr.ifr_flags |= IFF_DETACH_QUEUE; if (!tfile->socket.sk->sk_filter) ifr.ifr_flags |= IFF_NOFILTER; if (copy_to_user(argp, &ifr, ifreq_len)) ret = -EFAULT; break; case TUNSETNOCSUM: /* Disable/Enable checksum */ /* [unimplemented] */ netif_info(tun, drv, tun->dev, "ignored: set checksum %s\n", arg ? "disabled" : "enabled"); break; case TUNSETPERSIST: /* Disable/Enable persist mode. Keep an extra reference to the * module to prevent the module being unprobed. */ if (arg && !(tun->flags & IFF_PERSIST)) { tun->flags |= IFF_PERSIST; __module_get(THIS_MODULE); do_notify = true; } if (!arg && (tun->flags & IFF_PERSIST)) { tun->flags &= ~IFF_PERSIST; module_put(THIS_MODULE); do_notify = true; } netif_info(tun, drv, tun->dev, "persist %s\n", arg ? "enabled" : "disabled"); break; case TUNSETOWNER: /* Set owner of the device */ owner = make_kuid(current_user_ns(), arg); if (!uid_valid(owner)) { ret = -EINVAL; break; } tun->owner = owner; do_notify = true; netif_info(tun, drv, tun->dev, "owner set to %u\n", from_kuid(&init_user_ns, tun->owner)); break; case TUNSETGROUP: /* Set group of the device */ group = make_kgid(current_user_ns(), arg); if (!gid_valid(group)) { ret = -EINVAL; break; } tun->group = group; do_notify = true; netif_info(tun, drv, tun->dev, "group set to %u\n", from_kgid(&init_user_ns, tun->group)); break; case TUNSETLINK: /* Only allow setting the type when the interface is down */ if (tun->dev->flags & IFF_UP) { netif_info(tun, drv, tun->dev, "Linktype set failed because interface is up\n"); ret = -EBUSY; } else { ret = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, tun->dev); ret = notifier_to_errno(ret); if (ret) { netif_info(tun, drv, tun->dev, "Refused to change device type\n"); break; } tun->dev->type = (int) arg; tun->dev->addr_len = tun_get_addr_len(tun->dev->type); netif_info(tun, drv, tun->dev, "linktype set to %d\n", tun->dev->type); call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, tun->dev); } break; case TUNSETDEBUG: tun->msg_enable = (u32)arg; break; case TUNSETOFFLOAD: ret = set_offload(tun, arg); break; case TUNSETTXFILTER: /* Can be set only for TAPs */ ret = -EINVAL; if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) break; ret = update_filter(&tun->txflt, (void __user *)arg); break; case SIOCGIFHWADDR: /* Get hw address */ dev_get_mac_address(&ifr.ifr_hwaddr, net, tun->dev->name); if (copy_to_user(argp, &ifr, ifreq_len)) ret = -EFAULT; break; case SIOCSIFHWADDR: /* Set hw address */ ret = dev_set_mac_address_user(tun->dev, &ifr.ifr_hwaddr, NULL); break; case TUNGETSNDBUF: sndbuf = tfile->socket.sk->sk_sndbuf; if (copy_to_user(argp, &sndbuf, sizeof(sndbuf))) ret = -EFAULT; break; case TUNSETSNDBUF: if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) { ret = -EFAULT; break; } if (sndbuf <= 0) { ret = -EINVAL; break; } tun->sndbuf = sndbuf; tun_set_sndbuf(tun); break; case TUNATTACHFILTER: /* Can be set only for TAPs */ ret = -EINVAL; if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) break; ret = -EFAULT; if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog))) break; ret = tun_attach_filter(tun); break; case TUNDETACHFILTER: /* Can be set only for TAPs */ ret = -EINVAL; if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) break; ret = 0; tun_detach_filter(tun, tun->numqueues); break; case TUNGETFILTER: ret = -EINVAL; if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) break; ret = -EFAULT; if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog))) break; ret = 0; break; case TUNSETSTEERINGEBPF: ret = tun_set_ebpf(tun, &tun->steering_prog, argp); break; case TUNSETFILTEREBPF: ret = tun_set_ebpf(tun, &tun->filter_prog, argp); break; case TUNSETCARRIER: ret = -EFAULT; if (copy_from_user(&carrier, argp, sizeof(carrier))) goto unlock; ret = tun_net_change_carrier(tun->dev, (bool)carrier); break; case TUNGETDEVNETNS: ret = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) goto unlock; ret = open_related_ns(&net->ns, get_net_ns); break; default: ret = tun_vnet_ioctl(&tun->vnet_hdr_sz, &tun->flags, cmd, argp); break; } if (do_notify) netdev_state_change(tun->dev); unlock: rtnl_unlock(); if (tun) tun_put(tun); return ret; } static long tun_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq)); } #ifdef CONFIG_COMPAT static long tun_chr_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch (cmd) { case TUNSETIFF: case TUNGETIFF: case TUNSETTXFILTER: case TUNGETSNDBUF: case TUNSETSNDBUF: case SIOCGIFHWADDR: case SIOCSIFHWADDR: arg = (unsigned long)compat_ptr(arg); break; default: arg = (compat_ulong_t)arg; break; } /* * compat_ifreq is shorter than ifreq, so we must not access beyond * the end of that structure. All fields that are used in this * driver are compatible though, we don't need to convert the * contents. */ return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq)); } #endif /* CONFIG_COMPAT */ static int tun_chr_fasync(int fd, struct file *file, int on) { struct tun_file *tfile = file->private_data; int ret; if (on) { ret = file_f_owner_allocate(file); if (ret) goto out; } if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0) goto out; if (on) { __f_setown(file, task_pid(current), PIDTYPE_TGID, 0); tfile->flags |= TUN_FASYNC; } else tfile->flags &= ~TUN_FASYNC; ret = 0; out: return ret; } static int tun_chr_open(struct inode *inode, struct file * file) { struct net *net = current->nsproxy->net_ns; struct tun_file *tfile; tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, &tun_proto, 0); if (!tfile) return -ENOMEM; if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) { sk_free(&tfile->sk); return -ENOMEM; } mutex_init(&tfile->napi_mutex); RCU_INIT_POINTER(tfile->tun, NULL); tfile->flags = 0; tfile->ifindex = 0; init_waitqueue_head(&tfile->socket.wq.wait); tfile->socket.file = file; tfile->socket.ops = &tun_socket_ops; sock_init_data_uid(&tfile->socket, &tfile->sk, current_fsuid()); tfile->sk.sk_write_space = tun_sock_write_space; tfile->sk.sk_sndbuf = INT_MAX; file->private_data = tfile; INIT_LIST_HEAD(&tfile->next); sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); /* tun groks IOCB_NOWAIT just fine, mark it as such */ file->f_mode |= FMODE_NOWAIT; return 0; } static int tun_chr_close(struct inode *inode, struct file *file) { struct tun_file *tfile = file->private_data; tun_detach(tfile, true); return 0; } #ifdef CONFIG_PROC_FS static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file) { struct tun_file *tfile = file->private_data; struct tun_struct *tun; struct ifreq ifr; memset(&ifr, 0, sizeof(ifr)); rtnl_lock(); tun = tun_get(tfile); if (tun) tun_get_iff(tun, &ifr); rtnl_unlock(); if (tun) tun_put(tun); seq_printf(m, "iff:\t%s\n", ifr.ifr_name); } #endif static const struct file_operations tun_fops = { .owner = THIS_MODULE, .read_iter = tun_chr_read_iter, .write_iter = tun_chr_write_iter, .poll = tun_chr_poll, .unlocked_ioctl = tun_chr_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = tun_chr_compat_ioctl, #endif .open = tun_chr_open, .release = tun_chr_close, .fasync = tun_chr_fasync, #ifdef CONFIG_PROC_FS .show_fdinfo = tun_chr_show_fdinfo, #endif }; static struct miscdevice tun_miscdev = { .minor = TUN_MINOR, .name = "tun", .nodename = "net/tun", .fops = &tun_fops, }; /* ethtool interface */ static void tun_default_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { ethtool_link_ksettings_zero_link_mode(cmd, supported); ethtool_link_ksettings_zero_link_mode(cmd, advertising); cmd->base.speed = SPEED_10000; cmd->base.duplex = DUPLEX_FULL; cmd->base.port = PORT_TP; cmd->base.phy_address = 0; cmd->base.autoneg = AUTONEG_DISABLE; } static int tun_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct tun_struct *tun = netdev_priv(dev); memcpy(cmd, &tun->link_ksettings, sizeof(*cmd)); return 0; } static int tun_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { struct tun_struct *tun = netdev_priv(dev); memcpy(&tun->link_ksettings, cmd, sizeof(*cmd)); return 0; } static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct tun_struct *tun = netdev_priv(dev); strscpy(info->driver, DRV_NAME, sizeof(info->driver)); strscpy(info->version, DRV_VERSION, sizeof(info->version)); switch (tun->flags & TUN_TYPE_MASK) { case IFF_TUN: strscpy(info->bus_info, "tun", sizeof(info->bus_info)); break; case IFF_TAP: strscpy(info->bus_info, "tap", sizeof(info->bus_info)); break; } } static u32 tun_get_msglevel(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); return tun->msg_enable; } static void tun_set_msglevel(struct net_device *dev, u32 value) { struct tun_struct *tun = netdev_priv(dev); tun->msg_enable = value; } static int tun_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct tun_struct *tun = netdev_priv(dev); ec->rx_max_coalesced_frames = tun->rx_batched; return 0; } static int tun_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct tun_struct *tun = netdev_priv(dev); if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT) tun->rx_batched = NAPI_POLL_WEIGHT; else tun->rx_batched = ec->rx_max_coalesced_frames; return 0; } static void tun_get_channels(struct net_device *dev, struct ethtool_channels *channels) { struct tun_struct *tun = netdev_priv(dev); channels->combined_count = tun->numqueues; channels->max_combined = tun->flags & IFF_MULTI_QUEUE ? MAX_TAP_QUEUES : 1; } static const struct ethtool_ops tun_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES, .get_drvinfo = tun_get_drvinfo, .get_msglevel = tun_get_msglevel, .set_msglevel = tun_set_msglevel, .get_link = ethtool_op_get_link, .get_channels = tun_get_channels, .get_ts_info = ethtool_op_get_ts_info, .get_coalesce = tun_get_coalesce, .set_coalesce = tun_set_coalesce, .get_link_ksettings = tun_get_link_ksettings, .set_link_ksettings = tun_set_link_ksettings, }; static int tun_queue_resize(struct tun_struct *tun) { struct net_device *dev = tun->dev; struct tun_file *tfile; struct ptr_ring **rings; int n = tun->numqueues + tun->numdisabled; int ret, i; rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); if (!rings) return -ENOMEM; for (i = 0; i < tun->numqueues; i++) { tfile = rtnl_dereference(tun->tfiles[i]); rings[i] = &tfile->tx_ring; } list_for_each_entry(tfile, &tun->disabled, next) rings[i++] = &tfile->tx_ring; ret = ptr_ring_resize_multiple_bh(rings, n, dev->tx_queue_len, GFP_KERNEL, tun_ptr_free); kfree(rings); return ret; } static int tun_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct tun_struct *tun = netdev_priv(dev); int i; if (dev->rtnl_link_ops != &tun_link_ops) return NOTIFY_DONE; switch (event) { case NETDEV_CHANGE_TX_QUEUE_LEN: if (tun_queue_resize(tun)) return NOTIFY_BAD; break; case NETDEV_UP: for (i = 0; i < tun->numqueues; i++) { struct tun_file *tfile; tfile = rtnl_dereference(tun->tfiles[i]); tfile->socket.sk->sk_write_space(tfile->socket.sk); } break; default: break; } return NOTIFY_DONE; } static struct notifier_block tun_notifier_block __read_mostly = { .notifier_call = tun_device_event, }; static int __init tun_init(void) { int ret = 0; pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); ret = rtnl_link_register(&tun_link_ops); if (ret) { pr_err("Can't register link_ops\n"); goto err_linkops; } ret = misc_register(&tun_miscdev); if (ret) { pr_err("Can't register misc device %d\n", TUN_MINOR); goto err_misc; } ret = register_netdevice_notifier(&tun_notifier_block); if (ret) { pr_err("Can't register netdevice notifier\n"); goto err_notifier; } return 0; err_notifier: misc_deregister(&tun_miscdev); err_misc: rtnl_link_unregister(&tun_link_ops); err_linkops: return ret; } static void __exit tun_cleanup(void) { misc_deregister(&tun_miscdev); rtnl_link_unregister(&tun_link_ops); unregister_netdevice_notifier(&tun_notifier_block); } /* Get an underlying socket object from tun file. Returns error unless file is * attached to a device. The returned object works like a packet socket, it * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for * holding a reference to the file for as long as the socket is in use. */ struct socket *tun_get_socket(struct file *file) { struct tun_file *tfile; if (file->f_op != &tun_fops) return ERR_PTR(-EINVAL); tfile = file->private_data; if (!tfile) return ERR_PTR(-EBADFD); return &tfile->socket; } EXPORT_SYMBOL_GPL(tun_get_socket); struct ptr_ring *tun_get_tx_ring(struct file *file) { struct tun_file *tfile; if (file->f_op != &tun_fops) return ERR_PTR(-EINVAL); tfile = file->private_data; if (!tfile) return ERR_PTR(-EBADFD); return &tfile->tx_ring; } EXPORT_SYMBOL_GPL(tun_get_tx_ring); module_init(tun_init); module_exit(tun_cleanup); MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR(DRV_COPYRIGHT); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(TUN_MINOR); MODULE_ALIAS("devname:net/tun");
380 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 // SPDX-License-Identifier: GPL-2.0+ /* * IMA support for appraising module-style appended signatures. * * Copyright (C) 2019 IBM Corporation * * Author: * Thiago Jung Bauermann <bauerman@linux.ibm.com> */ #include <linux/types.h> #include <linux/module_signature.h> #include <keys/asymmetric-type.h> #include <crypto/pkcs7.h> #include "ima.h" struct modsig { struct pkcs7_message *pkcs7_msg; enum hash_algo hash_algo; /* This digest will go in the 'd-modsig' field of the IMA template. */ const u8 *digest; u32 digest_size; /* * This is what will go to the measurement list if the template requires * storing the signature. */ int raw_pkcs7_len; u8 raw_pkcs7[] __counted_by(raw_pkcs7_len); }; /* * ima_read_modsig - Read modsig from buf. * * Return: 0 on success, error code otherwise. */ int ima_read_modsig(enum ima_hooks func, const void *buf, loff_t buf_len, struct modsig **modsig) { const size_t marker_len = strlen(MODULE_SIG_STRING); const struct module_signature *sig; struct modsig *hdr; size_t sig_len; const void *p; int rc; if (buf_len <= marker_len + sizeof(*sig)) return -ENOENT; p = buf + buf_len - marker_len; if (memcmp(p, MODULE_SIG_STRING, marker_len)) return -ENOENT; buf_len -= marker_len; sig = (const struct module_signature *)(p - sizeof(*sig)); rc = mod_check_sig(sig, buf_len, func_tokens[func]); if (rc) return rc; sig_len = be32_to_cpu(sig->sig_len); buf_len -= sig_len + sizeof(*sig); /* Allocate sig_len additional bytes to hold the raw PKCS#7 data. */ hdr = kzalloc(struct_size(hdr, raw_pkcs7, sig_len), GFP_KERNEL); if (!hdr) return -ENOMEM; hdr->raw_pkcs7_len = sig_len; hdr->pkcs7_msg = pkcs7_parse_message(buf + buf_len, sig_len); if (IS_ERR(hdr->pkcs7_msg)) { rc = PTR_ERR(hdr->pkcs7_msg); kfree(hdr); return rc; } memcpy(hdr->raw_pkcs7, buf + buf_len, sig_len); /* We don't know the hash algorithm yet. */ hdr->hash_algo = HASH_ALGO__LAST; *modsig = hdr; return 0; } /** * ima_collect_modsig - Calculate the file hash without the appended signature. * @modsig: parsed module signature * @buf: data to verify the signature on * @size: data size * * Since the modsig is part of the file contents, the hash used in its signature * isn't the same one ordinarily calculated by IMA. Therefore PKCS7 code * calculates a separate one for signature verification. */ void ima_collect_modsig(struct modsig *modsig, const void *buf, loff_t size) { int rc; /* * Provide the file contents (minus the appended sig) so that the PKCS7 * code can calculate the file hash. */ size -= modsig->raw_pkcs7_len + strlen(MODULE_SIG_STRING) + sizeof(struct module_signature); rc = pkcs7_supply_detached_data(modsig->pkcs7_msg, buf, size); if (rc) return; /* Ask the PKCS7 code to calculate the file hash. */ rc = pkcs7_get_digest(modsig->pkcs7_msg, &modsig->digest, &modsig->digest_size, &modsig->hash_algo); } int ima_modsig_verify(struct key *keyring, const struct modsig *modsig) { return verify_pkcs7_message_sig(NULL, 0, modsig->pkcs7_msg, keyring, VERIFYING_MODULE_SIGNATURE, NULL, NULL); } int ima_get_modsig_digest(const struct modsig *modsig, enum hash_algo *algo, const u8 **digest, u32 *digest_size) { *algo = modsig->hash_algo; *digest = modsig->digest; *digest_size = modsig->digest_size; return 0; } int ima_get_raw_modsig(const struct modsig *modsig, const void **data, u32 *data_len) { *data = &modsig->raw_pkcs7; *data_len = modsig->raw_pkcs7_len; return 0; } void ima_free_modsig(struct modsig *modsig) { if (!modsig) return; pkcs7_free_message(modsig->pkcs7_msg); kfree(modsig); }
58 4 4 4 4 2 2 2 2 56 56 56 56 2 2 2 2 2 2 2 1 1 1 1 2 2 2 2 1 2 1 1 1 2 2 2 1 1 2 2 2 2 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 2 2 1 2 2 2 56 56 2 2 2 56 56 56 3 56 44 55 56 56 56 56 52 52 52 52 52 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2015-2017 Intel Deutschland GmbH * Copyright 2018-2020, 2022-2024 Intel Corporation */ #include <crypto/utils.h> #include <linux/if_ether.h> #include <linux/etherdevice.h> #include <linux/list.h> #include <linux/rcupdate.h> #include <linux/rtnetlink.h> #include <linux/slab.h> #include <linux/export.h> #include <net/mac80211.h> #include <linux/unaligned.h> #include "ieee80211_i.h" #include "driver-ops.h" #include "debugfs_key.h" #include "aes_ccm.h" #include "aes_cmac.h" #include "aes_gmac.h" #include "aes_gcm.h" /** * DOC: Key handling basics * * Key handling in mac80211 is done based on per-interface (sub_if_data) * keys and per-station keys. Since each station belongs to an interface, * each station key also belongs to that interface. * * Hardware acceleration is done on a best-effort basis for algorithms * that are implemented in software, for each key the hardware is asked * to enable that key for offloading but if it cannot do that the key is * simply kept for software encryption (unless it is for an algorithm * that isn't implemented in software). * There is currently no way of knowing whether a key is handled in SW * or HW except by looking into debugfs. * * All key management is internally protected by a mutex. Within all * other parts of mac80211, key references are, just as STA structure * references, protected by RCU. Note, however, that some things are * unprotected, namely the key->sta dereferences within the hardware * acceleration functions. This means that sta_info_destroy() must * remove the key which waits for an RCU grace period. */ static const u8 bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; static void update_vlan_tailroom_need_count(struct ieee80211_sub_if_data *sdata, int delta) { struct ieee80211_sub_if_data *vlan; if (sdata->vif.type != NL80211_IFTYPE_AP) return; /* crypto_tx_tailroom_needed_cnt is protected by this */ lockdep_assert_wiphy(sdata->local->hw.wiphy); rcu_read_lock(); list_for_each_entry_rcu(vlan, &sdata->u.ap.vlans, u.vlan.list) vlan->crypto_tx_tailroom_needed_cnt += delta; rcu_read_unlock(); } static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata) { /* * When this count is zero, SKB resizing for allocating tailroom * for IV or MMIC is skipped. But, this check has created two race * cases in xmit path while transiting from zero count to one: * * 1. SKB resize was skipped because no key was added but just before * the xmit key is added and SW encryption kicks off. * * 2. SKB resize was skipped because all the keys were hw planted but * just before xmit one of the key is deleted and SW encryption kicks * off. * * In both the above case SW encryption will find not enough space for * tailroom and exits with WARN_ON. (See WARN_ONs at wpa.c) * * Solution has been explained at * http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net */ lockdep_assert_wiphy(sdata->local->hw.wiphy); update_vlan_tailroom_need_count(sdata, 1); if (!sdata->crypto_tx_tailroom_needed_cnt++) { /* * Flush all XMIT packets currently using HW encryption or no * encryption at all if the count transition is from 0 -> 1. */ synchronize_net(); } } static void decrease_tailroom_need_count(struct ieee80211_sub_if_data *sdata, int delta) { lockdep_assert_wiphy(sdata->local->hw.wiphy); WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt < delta); update_vlan_tailroom_need_count(sdata, -delta); sdata->crypto_tx_tailroom_needed_cnt -= delta; } static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) { struct ieee80211_sub_if_data *sdata = key->sdata; struct sta_info *sta; int ret = -EOPNOTSUPP; might_sleep(); lockdep_assert_wiphy(key->local->hw.wiphy); if (key->flags & KEY_FLAG_TAINTED) { /* If we get here, it's during resume and the key is * tainted so shouldn't be used/programmed any more. * However, its flags may still indicate that it was * programmed into the device (since we're in resume) * so clear that flag now to avoid trying to remove * it again later. */ if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE && !(key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | IEEE80211_KEY_FLAG_PUT_MIC_SPACE | IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) increment_tailroom_need_count(sdata); key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; return -EINVAL; } if (!key->local->ops->set_key) goto out_unsupported; sta = key->sta; /* * If this is a per-STA GTK, check if it * is supported; if not, return. */ if (sta && !(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE) && !ieee80211_hw_check(&key->local->hw, SUPPORTS_PER_STA_GTK)) goto out_unsupported; if (sta && !sta->uploaded) goto out_unsupported; if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { /* * The driver doesn't know anything about VLAN interfaces. * Hence, don't send GTKs for VLAN interfaces to the driver. */ if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) { ret = 1; goto out_unsupported; } } if (key->conf.link_id >= 0 && sdata->vif.active_links && !(sdata->vif.active_links & BIT(key->conf.link_id))) return 0; ret = drv_set_key(key->local, SET_KEY, sdata, sta ? &sta->sta : NULL, &key->conf); if (!ret) { key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; if (!(key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | IEEE80211_KEY_FLAG_PUT_MIC_SPACE | IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) decrease_tailroom_need_count(sdata, 1); WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)); WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_MIC_SPACE) && (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)); return 0; } if (ret != -ENOSPC && ret != -EOPNOTSUPP && ret != 1) sdata_err(sdata, "failed to set key (%d, %pM) to hardware (%d)\n", key->conf.keyidx, sta ? sta->sta.addr : bcast_addr, ret); out_unsupported: switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: /* all of these we can do in software - if driver can */ if (ret == 1) return 0; if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL)) return -EINVAL; return 0; default: return -EINVAL; } } static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) { struct ieee80211_sub_if_data *sdata; struct sta_info *sta; int ret; might_sleep(); if (!key || !key->local->ops->set_key) return; if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) return; sta = key->sta; sdata = key->sdata; lockdep_assert_wiphy(key->local->hw.wiphy); if (key->conf.link_id >= 0 && sdata->vif.active_links && !(sdata->vif.active_links & BIT(key->conf.link_id))) return; if (!(key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | IEEE80211_KEY_FLAG_PUT_MIC_SPACE | IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) increment_tailroom_need_count(sdata); key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; ret = drv_set_key(key->local, DISABLE_KEY, sdata, sta ? &sta->sta : NULL, &key->conf); if (ret) sdata_err(sdata, "failed to remove key (%d, %pM) from hardware (%d)\n", key->conf.keyidx, sta ? sta->sta.addr : bcast_addr, ret); } static int _ieee80211_set_tx_key(struct ieee80211_key *key, bool force) { struct sta_info *sta = key->sta; struct ieee80211_local *local = key->local; lockdep_assert_wiphy(local->hw.wiphy); set_sta_flag(sta, WLAN_STA_USES_ENCRYPTION); sta->ptk_idx = key->conf.keyidx; if (force || !ieee80211_hw_check(&local->hw, AMPDU_KEYBORDER_SUPPORT)) clear_sta_flag(sta, WLAN_STA_BLOCK_BA); ieee80211_check_fast_xmit(sta); return 0; } int ieee80211_set_tx_key(struct ieee80211_key *key) { return _ieee80211_set_tx_key(key, false); } static void ieee80211_pairwise_rekey(struct ieee80211_key *old, struct ieee80211_key *new) { struct ieee80211_local *local = new->local; struct sta_info *sta = new->sta; int i; lockdep_assert_wiphy(local->hw.wiphy); if (new->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX) { /* Extended Key ID key install, initial one or rekey */ if (sta->ptk_idx != INVALID_PTK_KEYIDX && !ieee80211_hw_check(&local->hw, AMPDU_KEYBORDER_SUPPORT)) { /* Aggregation Sessions with Extended Key ID must not * mix MPDUs with different keyIDs within one A-MPDU. * Tear down running Tx aggregation sessions and block * new Rx/Tx aggregation requests during rekey to * ensure there are no A-MPDUs when the driver is not * supporting A-MPDU key borders. (Blocking Tx only * would be sufficient but WLAN_STA_BLOCK_BA gets the * job done for the few ms we need it.) */ set_sta_flag(sta, WLAN_STA_BLOCK_BA); for (i = 0; i < IEEE80211_NUM_TIDS; i++) __ieee80211_stop_tx_ba_session(sta, i, AGG_STOP_LOCAL_REQUEST); } } else if (old) { /* Rekey without Extended Key ID. * Aggregation sessions are OK when running on SW crypto. * A broken remote STA may cause issues not observed with HW * crypto, though. */ if (!(old->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) return; /* Stop Tx till we are on the new key */ old->flags |= KEY_FLAG_TAINTED; ieee80211_clear_fast_xmit(sta); if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION)) { set_sta_flag(sta, WLAN_STA_BLOCK_BA); ieee80211_sta_tear_down_BA_sessions(sta, AGG_STOP_LOCAL_REQUEST); } if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0)) { pr_warn_ratelimited("Rekeying PTK for STA %pM but driver can't safely do that.", sta->sta.addr); /* Flushing the driver queues *may* help prevent * the clear text leaks and freezes. */ ieee80211_flush_queues(local, old->sdata, false); } } } static void __ieee80211_set_default_key(struct ieee80211_link_data *link, int idx, bool uni, bool multi) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_key *key = NULL; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (idx >= 0 && idx < NUM_DEFAULT_KEYS) { key = wiphy_dereference(sdata->local->hw.wiphy, sdata->keys[idx]); if (!key) key = wiphy_dereference(sdata->local->hw.wiphy, link->gtk[idx]); } if (uni) { rcu_assign_pointer(sdata->default_unicast_key, key); ieee80211_check_fast_xmit_iface(sdata); if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN) drv_set_default_unicast_key(sdata->local, sdata, idx); } if (multi) rcu_assign_pointer(link->default_multicast_key, key); ieee80211_debugfs_key_update_default(sdata); } void ieee80211_set_default_key(struct ieee80211_link_data *link, int idx, bool uni, bool multi) { lockdep_assert_wiphy(link->sdata->local->hw.wiphy); __ieee80211_set_default_key(link, idx, uni, multi); } static void __ieee80211_set_default_mgmt_key(struct ieee80211_link_data *link, int idx) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_key *key = NULL; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (idx >= NUM_DEFAULT_KEYS && idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) key = wiphy_dereference(sdata->local->hw.wiphy, link->gtk[idx]); rcu_assign_pointer(link->default_mgmt_key, key); ieee80211_debugfs_key_update_default(sdata); } void ieee80211_set_default_mgmt_key(struct ieee80211_link_data *link, int idx) { lockdep_assert_wiphy(link->sdata->local->hw.wiphy); __ieee80211_set_default_mgmt_key(link, idx); } static void __ieee80211_set_default_beacon_key(struct ieee80211_link_data *link, int idx) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_key *key = NULL; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS && idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS + NUM_DEFAULT_BEACON_KEYS) key = wiphy_dereference(sdata->local->hw.wiphy, link->gtk[idx]); rcu_assign_pointer(link->default_beacon_key, key); ieee80211_debugfs_key_update_default(sdata); } void ieee80211_set_default_beacon_key(struct ieee80211_link_data *link, int idx) { lockdep_assert_wiphy(link->sdata->local->hw.wiphy); __ieee80211_set_default_beacon_key(link, idx); } static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata, struct ieee80211_link_data *link, struct sta_info *sta, bool pairwise, struct ieee80211_key *old, struct ieee80211_key *new) { struct link_sta_info *link_sta = sta ? &sta->deflink : NULL; int link_id; int idx; int ret = 0; bool defunikey, defmultikey, defmgmtkey, defbeaconkey; bool is_wep; lockdep_assert_wiphy(sdata->local->hw.wiphy); /* caller must provide at least one old/new */ if (WARN_ON(!new && !old)) return 0; if (new) { idx = new->conf.keyidx; is_wep = new->conf.cipher == WLAN_CIPHER_SUITE_WEP40 || new->conf.cipher == WLAN_CIPHER_SUITE_WEP104; link_id = new->conf.link_id; } else { idx = old->conf.keyidx; is_wep = old->conf.cipher == WLAN_CIPHER_SUITE_WEP40 || old->conf.cipher == WLAN_CIPHER_SUITE_WEP104; link_id = old->conf.link_id; } if (WARN(old && old->conf.link_id != link_id, "old link ID %d doesn't match new link ID %d\n", old->conf.link_id, link_id)) return -EINVAL; if (link_id >= 0) { if (!link) { link = sdata_dereference(sdata->link[link_id], sdata); if (!link) return -ENOLINK; } if (sta) { link_sta = rcu_dereference_protected(sta->link[link_id], lockdep_is_held(&sta->local->hw.wiphy->mtx)); if (!link_sta) return -ENOLINK; } } else { link = &sdata->deflink; } if ((is_wep || pairwise) && idx >= NUM_DEFAULT_KEYS) return -EINVAL; WARN_ON(new && old && new->conf.keyidx != old->conf.keyidx); if (new && sta && pairwise) { /* Unicast rekey needs special handling. With Extended Key ID * old is still NULL for the first rekey. */ ieee80211_pairwise_rekey(old, new); } if (old) { if (old->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { ieee80211_key_disable_hw_accel(old); if (new) ret = ieee80211_key_enable_hw_accel(new); } } else { if (!new->local->wowlan) ret = ieee80211_key_enable_hw_accel(new); else new->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; } if (ret) return ret; if (new) list_add_tail_rcu(&new->list, &sdata->key_list); if (sta) { if (pairwise) { rcu_assign_pointer(sta->ptk[idx], new); if (new && !(new->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX)) _ieee80211_set_tx_key(new, true); } else { rcu_assign_pointer(link_sta->gtk[idx], new); } /* Only needed for transition from no key -> key. * Still triggers unnecessary when using Extended Key ID * and installing the second key ID the first time. */ if (new && !old) ieee80211_check_fast_rx(sta); } else { defunikey = old && old == wiphy_dereference(sdata->local->hw.wiphy, sdata->default_unicast_key); defmultikey = old && old == wiphy_dereference(sdata->local->hw.wiphy, link->default_multicast_key); defmgmtkey = old && old == wiphy_dereference(sdata->local->hw.wiphy, link->default_mgmt_key); defbeaconkey = old && old == wiphy_dereference(sdata->local->hw.wiphy, link->default_beacon_key); if (defunikey && !new) __ieee80211_set_default_key(link, -1, true, false); if (defmultikey && !new) __ieee80211_set_default_key(link, -1, false, true); if (defmgmtkey && !new) __ieee80211_set_default_mgmt_key(link, -1); if (defbeaconkey && !new) __ieee80211_set_default_beacon_key(link, -1); if (is_wep || pairwise) rcu_assign_pointer(sdata->keys[idx], new); else rcu_assign_pointer(link->gtk[idx], new); if (defunikey && new) __ieee80211_set_default_key(link, new->conf.keyidx, true, false); if (defmultikey && new) __ieee80211_set_default_key(link, new->conf.keyidx, false, true); if (defmgmtkey && new) __ieee80211_set_default_mgmt_key(link, new->conf.keyidx); if (defbeaconkey && new) __ieee80211_set_default_beacon_key(link, new->conf.keyidx); } if (old) list_del_rcu(&old->list); return 0; } struct ieee80211_key * ieee80211_key_alloc(u32 cipher, int idx, size_t key_len, const u8 *key_data, size_t seq_len, const u8 *seq) { struct ieee80211_key *key; int i, j, err; if (WARN_ON(idx < 0 || idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS + NUM_DEFAULT_BEACON_KEYS)) return ERR_PTR(-EINVAL); key = kzalloc(sizeof(struct ieee80211_key) + key_len, GFP_KERNEL); if (!key) return ERR_PTR(-ENOMEM); /* * Default to software encryption; we'll later upload the * key to the hardware if possible. */ key->conf.flags = 0; key->flags = 0; key->conf.link_id = -1; key->conf.cipher = cipher; key->conf.keyidx = idx; key->conf.keylen = key_len; switch (cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: key->conf.iv_len = IEEE80211_WEP_IV_LEN; key->conf.icv_len = IEEE80211_WEP_ICV_LEN; break; case WLAN_CIPHER_SUITE_TKIP: key->conf.iv_len = IEEE80211_TKIP_IV_LEN; key->conf.icv_len = IEEE80211_TKIP_ICV_LEN; if (seq) { for (i = 0; i < IEEE80211_NUM_TIDS; i++) { key->u.tkip.rx[i].iv32 = get_unaligned_le32(&seq[2]); key->u.tkip.rx[i].iv16 = get_unaligned_le16(seq); } } spin_lock_init(&key->u.tkip.txlock); break; case WLAN_CIPHER_SUITE_CCMP: key->conf.iv_len = IEEE80211_CCMP_HDR_LEN; key->conf.icv_len = IEEE80211_CCMP_MIC_LEN; if (seq) { for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) for (j = 0; j < IEEE80211_CCMP_PN_LEN; j++) key->u.ccmp.rx_pn[i][j] = seq[IEEE80211_CCMP_PN_LEN - j - 1]; } /* * Initialize AES key state here as an optimization so that * it does not need to be initialized for every packet. */ key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt( key_data, key_len, IEEE80211_CCMP_MIC_LEN); if (IS_ERR(key->u.ccmp.tfm)) { err = PTR_ERR(key->u.ccmp.tfm); kfree(key); return ERR_PTR(err); } break; case WLAN_CIPHER_SUITE_CCMP_256: key->conf.iv_len = IEEE80211_CCMP_256_HDR_LEN; key->conf.icv_len = IEEE80211_CCMP_256_MIC_LEN; for (i = 0; seq && i < IEEE80211_NUM_TIDS + 1; i++) for (j = 0; j < IEEE80211_CCMP_256_PN_LEN; j++) key->u.ccmp.rx_pn[i][j] = seq[IEEE80211_CCMP_256_PN_LEN - j - 1]; /* Initialize AES key state here as an optimization so that * it does not need to be initialized for every packet. */ key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt( key_data, key_len, IEEE80211_CCMP_256_MIC_LEN); if (IS_ERR(key->u.ccmp.tfm)) { err = PTR_ERR(key->u.ccmp.tfm); kfree(key); return ERR_PTR(err); } break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: key->conf.iv_len = 0; if (cipher == WLAN_CIPHER_SUITE_AES_CMAC) key->conf.icv_len = sizeof(struct ieee80211_mmie); else key->conf.icv_len = sizeof(struct ieee80211_mmie_16); if (seq) for (j = 0; j < IEEE80211_CMAC_PN_LEN; j++) key->u.aes_cmac.rx_pn[j] = seq[IEEE80211_CMAC_PN_LEN - j - 1]; /* * Initialize AES key state here as an optimization so that * it does not need to be initialized for every packet. */ key->u.aes_cmac.tfm = ieee80211_aes_cmac_key_setup(key_data, key_len); if (IS_ERR(key->u.aes_cmac.tfm)) { err = PTR_ERR(key->u.aes_cmac.tfm); kfree(key); return ERR_PTR(err); } break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: key->conf.iv_len = 0; key->conf.icv_len = sizeof(struct ieee80211_mmie_16); if (seq) for (j = 0; j < IEEE80211_GMAC_PN_LEN; j++) key->u.aes_gmac.rx_pn[j] = seq[IEEE80211_GMAC_PN_LEN - j - 1]; /* Initialize AES key state here as an optimization so that * it does not need to be initialized for every packet. */ key->u.aes_gmac.tfm = ieee80211_aes_gmac_key_setup(key_data, key_len); if (IS_ERR(key->u.aes_gmac.tfm)) { err = PTR_ERR(key->u.aes_gmac.tfm); kfree(key); return ERR_PTR(err); } break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: key->conf.iv_len = IEEE80211_GCMP_HDR_LEN; key->conf.icv_len = IEEE80211_GCMP_MIC_LEN; for (i = 0; seq && i < IEEE80211_NUM_TIDS + 1; i++) for (j = 0; j < IEEE80211_GCMP_PN_LEN; j++) key->u.gcmp.rx_pn[i][j] = seq[IEEE80211_GCMP_PN_LEN - j - 1]; /* Initialize AES key state here as an optimization so that * it does not need to be initialized for every packet. */ key->u.gcmp.tfm = ieee80211_aes_gcm_key_setup_encrypt(key_data, key_len); if (IS_ERR(key->u.gcmp.tfm)) { err = PTR_ERR(key->u.gcmp.tfm); kfree(key); return ERR_PTR(err); } break; } memcpy(key->conf.key, key_data, key_len); INIT_LIST_HEAD(&key->list); return key; } static void ieee80211_key_free_common(struct ieee80211_key *key) { switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: ieee80211_aes_key_free(key->u.ccmp.tfm); break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm); break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: ieee80211_aes_gmac_key_free(key->u.aes_gmac.tfm); break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: ieee80211_aes_gcm_key_free(key->u.gcmp.tfm); break; } kfree_sensitive(key); } static void __ieee80211_key_destroy(struct ieee80211_key *key, bool delay_tailroom) { if (key->local) { struct ieee80211_sub_if_data *sdata = key->sdata; ieee80211_debugfs_key_remove(key); if (delay_tailroom) { /* see ieee80211_delayed_tailroom_dec */ sdata->crypto_tx_tailroom_pending_dec++; wiphy_delayed_work_queue(sdata->local->hw.wiphy, &sdata->dec_tailroom_needed_wk, HZ / 2); } else { decrease_tailroom_need_count(sdata, 1); } } ieee80211_key_free_common(key); } static void ieee80211_key_destroy(struct ieee80211_key *key, bool delay_tailroom) { if (!key) return; /* * Synchronize so the TX path and rcu key iterators * can no longer be using this key before we free/remove it. */ synchronize_net(); __ieee80211_key_destroy(key, delay_tailroom); } void ieee80211_key_free_unused(struct ieee80211_key *key) { if (!key) return; WARN_ON(key->sdata || key->local); ieee80211_key_free_common(key); } static bool ieee80211_key_identical(struct ieee80211_sub_if_data *sdata, struct ieee80211_key *old, struct ieee80211_key *new) { u8 tkip_old[WLAN_KEY_LEN_TKIP], tkip_new[WLAN_KEY_LEN_TKIP]; u8 *tk_old, *tk_new; if (!old || new->conf.keylen != old->conf.keylen) return false; tk_old = old->conf.key; tk_new = new->conf.key; /* * In station mode, don't compare the TX MIC key, as it's never used * and offloaded rekeying may not care to send it to the host. This * is the case in iwlwifi, for example. */ if (sdata->vif.type == NL80211_IFTYPE_STATION && new->conf.cipher == WLAN_CIPHER_SUITE_TKIP && new->conf.keylen == WLAN_KEY_LEN_TKIP && !(new->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) { memcpy(tkip_old, tk_old, WLAN_KEY_LEN_TKIP); memcpy(tkip_new, tk_new, WLAN_KEY_LEN_TKIP); memset(tkip_old + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8); memset(tkip_new + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8); tk_old = tkip_old; tk_new = tkip_new; } return !crypto_memneq(tk_old, tk_new, new->conf.keylen); } int ieee80211_key_link(struct ieee80211_key *key, struct ieee80211_link_data *link, struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = link->sdata; static atomic_t key_color = ATOMIC_INIT(0); struct ieee80211_key *old_key = NULL; int idx = key->conf.keyidx; bool pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE; /* * We want to delay tailroom updates only for station - in that * case it helps roaming speed, but in other cases it hurts and * can cause warnings to appear. */ bool delay_tailroom = sdata->vif.type == NL80211_IFTYPE_STATION; int ret; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (sta && pairwise) { struct ieee80211_key *alt_key; old_key = wiphy_dereference(sdata->local->hw.wiphy, sta->ptk[idx]); alt_key = wiphy_dereference(sdata->local->hw.wiphy, sta->ptk[idx ^ 1]); /* The rekey code assumes that the old and new key are using * the same cipher. Enforce the assumption for pairwise keys. */ if ((alt_key && alt_key->conf.cipher != key->conf.cipher) || (old_key && old_key->conf.cipher != key->conf.cipher)) { ret = -EOPNOTSUPP; goto out; } } else if (sta) { struct link_sta_info *link_sta = &sta->deflink; int link_id = key->conf.link_id; if (link_id >= 0) { link_sta = rcu_dereference_protected(sta->link[link_id], lockdep_is_held(&sta->local->hw.wiphy->mtx)); if (!link_sta) { ret = -ENOLINK; goto out; } } old_key = wiphy_dereference(sdata->local->hw.wiphy, link_sta->gtk[idx]); } else { if (idx < NUM_DEFAULT_KEYS) old_key = wiphy_dereference(sdata->local->hw.wiphy, sdata->keys[idx]); if (!old_key) old_key = wiphy_dereference(sdata->local->hw.wiphy, link->gtk[idx]); } /* Non-pairwise keys must also not switch the cipher on rekey */ if (!pairwise) { if (old_key && old_key->conf.cipher != key->conf.cipher) { ret = -EOPNOTSUPP; goto out; } } /* * Silently accept key re-installation without really installing the * new version of the key to avoid nonce reuse or replay issues. */ if (ieee80211_key_identical(sdata, old_key, key)) { ret = -EALREADY; goto out; } key->local = sdata->local; key->sdata = sdata; key->sta = sta; /* * Assign a unique ID to every key so we can easily prevent mixed * key and fragment cache attacks. */ key->color = atomic_inc_return(&key_color); /* keep this flag for easier access later */ if (sta && sta->sta.spp_amsdu) key->conf.flags |= IEEE80211_KEY_FLAG_SPP_AMSDU; increment_tailroom_need_count(sdata); ret = ieee80211_key_replace(sdata, link, sta, pairwise, old_key, key); if (!ret) { ieee80211_debugfs_key_add(key); ieee80211_key_destroy(old_key, delay_tailroom); } else { ieee80211_key_free(key, delay_tailroom); } key = NULL; out: ieee80211_key_free_unused(key); return ret; } void ieee80211_key_free(struct ieee80211_key *key, bool delay_tailroom) { if (!key) return; /* * Replace key with nothingness if it was ever used. */ if (key->sdata) ieee80211_key_replace(key->sdata, NULL, key->sta, key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, key, NULL); ieee80211_key_destroy(key, delay_tailroom); } void ieee80211_reenable_keys(struct ieee80211_sub_if_data *sdata) { struct ieee80211_key *key; struct ieee80211_sub_if_data *vlan; lockdep_assert_wiphy(sdata->local->hw.wiphy); sdata->crypto_tx_tailroom_needed_cnt = 0; sdata->crypto_tx_tailroom_pending_dec = 0; if (sdata->vif.type == NL80211_IFTYPE_AP) { list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) { vlan->crypto_tx_tailroom_needed_cnt = 0; vlan->crypto_tx_tailroom_pending_dec = 0; } } if (ieee80211_sdata_running(sdata)) { list_for_each_entry(key, &sdata->key_list, list) { increment_tailroom_need_count(sdata); ieee80211_key_enable_hw_accel(key); } } } static void ieee80211_key_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_key *key, void (*iter)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *data), void *iter_data) { /* skip keys of station in removal process */ if (key->sta && key->sta->removed) return; if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) return; iter(hw, vif, key->sta ? &key->sta->sta : NULL, &key->conf, iter_data); } void ieee80211_iter_keys(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void (*iter)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *data), void *iter_data) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_key *key, *tmp; struct ieee80211_sub_if_data *sdata; lockdep_assert_wiphy(hw->wiphy); if (vif) { sdata = vif_to_sdata(vif); list_for_each_entry_safe(key, tmp, &sdata->key_list, list) ieee80211_key_iter(hw, vif, key, iter, iter_data); } else { list_for_each_entry(sdata, &local->interfaces, list) list_for_each_entry_safe(key, tmp, &sdata->key_list, list) ieee80211_key_iter(hw, &sdata->vif, key, iter, iter_data); } } EXPORT_SYMBOL(ieee80211_iter_keys); static void _ieee80211_iter_keys_rcu(struct ieee80211_hw *hw, struct ieee80211_sub_if_data *sdata, void (*iter)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *data), void *iter_data) { struct ieee80211_key *key; list_for_each_entry_rcu(key, &sdata->key_list, list) ieee80211_key_iter(hw, &sdata->vif, key, iter, iter_data); } void ieee80211_iter_keys_rcu(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void (*iter)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *data), void *iter_data) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_sub_if_data *sdata; if (vif) { sdata = vif_to_sdata(vif); _ieee80211_iter_keys_rcu(hw, sdata, iter, iter_data); } else { list_for_each_entry_rcu(sdata, &local->interfaces, list) _ieee80211_iter_keys_rcu(hw, sdata, iter, iter_data); } } EXPORT_SYMBOL(ieee80211_iter_keys_rcu); static void ieee80211_free_keys_iface(struct ieee80211_sub_if_data *sdata, struct list_head *keys) { struct ieee80211_key *key, *tmp; decrease_tailroom_need_count(sdata, sdata->crypto_tx_tailroom_pending_dec); sdata->crypto_tx_tailroom_pending_dec = 0; ieee80211_debugfs_key_remove_mgmt_default(sdata); ieee80211_debugfs_key_remove_beacon_default(sdata); list_for_each_entry_safe(key, tmp, &sdata->key_list, list) { ieee80211_key_replace(key->sdata, NULL, key->sta, key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, key, NULL); list_add_tail(&key->list, keys); } ieee80211_debugfs_key_update_default(sdata); } void ieee80211_remove_link_keys(struct ieee80211_link_data *link, struct list_head *keys) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; struct ieee80211_key *key, *tmp; lockdep_assert_wiphy(local->hw.wiphy); list_for_each_entry_safe(key, tmp, &sdata->key_list, list) { if (key->conf.link_id != link->link_id) continue; ieee80211_key_replace(key->sdata, link, key->sta, key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, key, NULL); list_add_tail(&key->list, keys); } } void ieee80211_free_key_list(struct ieee80211_local *local, struct list_head *keys) { struct ieee80211_key *key, *tmp; lockdep_assert_wiphy(local->hw.wiphy); list_for_each_entry_safe(key, tmp, keys, list) __ieee80211_key_destroy(key, false); } void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata, bool force_synchronize) { struct ieee80211_local *local = sdata->local; struct ieee80211_sub_if_data *vlan; struct ieee80211_sub_if_data *master; struct ieee80211_key *key, *tmp; LIST_HEAD(keys); wiphy_delayed_work_cancel(local->hw.wiphy, &sdata->dec_tailroom_needed_wk); lockdep_assert_wiphy(local->hw.wiphy); ieee80211_free_keys_iface(sdata, &keys); if (sdata->vif.type == NL80211_IFTYPE_AP) { list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) ieee80211_free_keys_iface(vlan, &keys); } if (!list_empty(&keys) || force_synchronize) synchronize_net(); list_for_each_entry_safe(key, tmp, &keys, list) __ieee80211_key_destroy(key, false); if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { if (sdata->bss) { master = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt != master->crypto_tx_tailroom_needed_cnt); } } else { WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt || sdata->crypto_tx_tailroom_pending_dec); } if (sdata->vif.type == NL80211_IFTYPE_AP) { list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt || vlan->crypto_tx_tailroom_pending_dec); } } void ieee80211_free_sta_keys(struct ieee80211_local *local, struct sta_info *sta) { struct ieee80211_key *key; int i; lockdep_assert_wiphy(local->hw.wiphy); for (i = 0; i < ARRAY_SIZE(sta->deflink.gtk); i++) { key = wiphy_dereference(local->hw.wiphy, sta->deflink.gtk[i]); if (!key) continue; ieee80211_key_replace(key->sdata, NULL, key->sta, key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, key, NULL); __ieee80211_key_destroy(key, key->sdata->vif.type == NL80211_IFTYPE_STATION); } for (i = 0; i < NUM_DEFAULT_KEYS; i++) { key = wiphy_dereference(local->hw.wiphy, sta->ptk[i]); if (!key) continue; ieee80211_key_replace(key->sdata, NULL, key->sta, key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, key, NULL); __ieee80211_key_destroy(key, key->sdata->vif.type == NL80211_IFTYPE_STATION); } } void ieee80211_delayed_tailroom_dec(struct wiphy *wiphy, struct wiphy_work *wk) { struct ieee80211_sub_if_data *sdata; sdata = container_of(wk, struct ieee80211_sub_if_data, dec_tailroom_needed_wk.work); /* * The reason for the delayed tailroom needed decrementing is to * make roaming faster: during roaming, all keys are first deleted * and then new keys are installed. The first new key causes the * crypto_tx_tailroom_needed_cnt to go from 0 to 1, which invokes * the cost of synchronize_net() (which can be slow). Avoid this * by deferring the crypto_tx_tailroom_needed_cnt decrementing on * key removal for a while, so if we roam the value is larger than * zero and no 0->1 transition happens. * * The cost is that if the AP switching was from an AP with keys * to one without, we still allocate tailroom while it would no * longer be needed. However, in the typical (fast) roaming case * within an ESS this usually won't happen. */ decrease_tailroom_need_count(sdata, sdata->crypto_tx_tailroom_pending_dec); sdata->crypto_tx_tailroom_pending_dec = 0; } void ieee80211_gtk_rekey_notify(struct ieee80211_vif *vif, const u8 *bssid, const u8 *replay_ctr, gfp_t gfp) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); trace_api_gtk_rekey_notify(sdata, bssid, replay_ctr); cfg80211_gtk_rekey_notify(sdata->dev, bssid, replay_ctr, gfp); } EXPORT_SYMBOL_GPL(ieee80211_gtk_rekey_notify); void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf, int tid, struct ieee80211_key_seq *seq) { struct ieee80211_key *key; const u8 *pn; key = container_of(keyconf, struct ieee80211_key, conf); switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_TKIP: if (WARN_ON(tid < 0 || tid >= IEEE80211_NUM_TIDS)) return; seq->tkip.iv32 = key->u.tkip.rx[tid].iv32; seq->tkip.iv16 = key->u.tkip.rx[tid].iv16; break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS)) return; if (tid < 0) pn = key->u.ccmp.rx_pn[IEEE80211_NUM_TIDS]; else pn = key->u.ccmp.rx_pn[tid]; memcpy(seq->ccmp.pn, pn, IEEE80211_CCMP_PN_LEN); break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: if (WARN_ON(tid != 0)) return; pn = key->u.aes_cmac.rx_pn; memcpy(seq->aes_cmac.pn, pn, IEEE80211_CMAC_PN_LEN); break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: if (WARN_ON(tid != 0)) return; pn = key->u.aes_gmac.rx_pn; memcpy(seq->aes_gmac.pn, pn, IEEE80211_GMAC_PN_LEN); break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS)) return; if (tid < 0) pn = key->u.gcmp.rx_pn[IEEE80211_NUM_TIDS]; else pn = key->u.gcmp.rx_pn[tid]; memcpy(seq->gcmp.pn, pn, IEEE80211_GCMP_PN_LEN); break; } } EXPORT_SYMBOL(ieee80211_get_key_rx_seq); void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf, int tid, struct ieee80211_key_seq *seq) { struct ieee80211_key *key; u8 *pn; key = container_of(keyconf, struct ieee80211_key, conf); switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_TKIP: if (WARN_ON(tid < 0 || tid >= IEEE80211_NUM_TIDS)) return; key->u.tkip.rx[tid].iv32 = seq->tkip.iv32; key->u.tkip.rx[tid].iv16 = seq->tkip.iv16; break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS)) return; if (tid < 0) pn = key->u.ccmp.rx_pn[IEEE80211_NUM_TIDS]; else pn = key->u.ccmp.rx_pn[tid]; memcpy(pn, seq->ccmp.pn, IEEE80211_CCMP_PN_LEN); break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: if (WARN_ON(tid != 0)) return; pn = key->u.aes_cmac.rx_pn; memcpy(pn, seq->aes_cmac.pn, IEEE80211_CMAC_PN_LEN); break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: if (WARN_ON(tid != 0)) return; pn = key->u.aes_gmac.rx_pn; memcpy(pn, seq->aes_gmac.pn, IEEE80211_GMAC_PN_LEN); break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS)) return; if (tid < 0) pn = key->u.gcmp.rx_pn[IEEE80211_NUM_TIDS]; else pn = key->u.gcmp.rx_pn[tid]; memcpy(pn, seq->gcmp.pn, IEEE80211_GCMP_PN_LEN); break; default: WARN_ON(1); break; } } EXPORT_SYMBOL_GPL(ieee80211_set_key_rx_seq); void ieee80211_remove_key(struct ieee80211_key_conf *keyconf) { struct ieee80211_key *key; key = container_of(keyconf, struct ieee80211_key, conf); lockdep_assert_wiphy(key->local->hw.wiphy); /* * if key was uploaded, we assume the driver will/has remove(d) * it, so adjust bookkeeping accordingly */ if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; if (!(key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | IEEE80211_KEY_FLAG_PUT_MIC_SPACE | IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) increment_tailroom_need_count(key->sdata); } ieee80211_key_free(key, false); } EXPORT_SYMBOL_GPL(ieee80211_remove_key); struct ieee80211_key_conf * ieee80211_gtk_rekey_add(struct ieee80211_vif *vif, struct ieee80211_key_conf *keyconf, int link_id) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_local *local = sdata->local; struct ieee80211_key *key; int err; struct ieee80211_link_data *link_data = link_id < 0 ? &sdata->deflink : sdata_dereference(sdata->link[link_id], sdata); if (WARN_ON(!link_data)) return ERR_PTR(-EINVAL); if (WARN_ON(!local->wowlan)) return ERR_PTR(-EINVAL); if (WARN_ON(vif->type != NL80211_IFTYPE_STATION)) return ERR_PTR(-EINVAL); key = ieee80211_key_alloc(keyconf->cipher, keyconf->keyidx, keyconf->keylen, keyconf->key, 0, NULL); if (IS_ERR(key)) return ERR_CAST(key); if (sdata->u.mgd.mfp != IEEE80211_MFP_DISABLED) key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT; key->conf.link_id = link_data->link_id; err = ieee80211_key_link(key, link_data, NULL); if (err) return ERR_PTR(err); return &key->conf; } EXPORT_SYMBOL_GPL(ieee80211_gtk_rekey_add); void ieee80211_key_mic_failure(struct ieee80211_key_conf *keyconf) { struct ieee80211_key *key; key = container_of(keyconf, struct ieee80211_key, conf); switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: key->u.aes_cmac.icverrors++; break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: key->u.aes_gmac.icverrors++; break; default: /* ignore the others for now, we don't keep counters now */ break; } } EXPORT_SYMBOL_GPL(ieee80211_key_mic_failure); void ieee80211_key_replay(struct ieee80211_key_conf *keyconf) { struct ieee80211_key *key; key = container_of(keyconf, struct ieee80211_key, conf); switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: key->u.ccmp.replays++; break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: key->u.aes_cmac.replays++; break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: key->u.aes_gmac.replays++; break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: key->u.gcmp.replays++; break; } } EXPORT_SYMBOL_GPL(ieee80211_key_replay); int ieee80211_key_switch_links(struct ieee80211_sub_if_data *sdata, unsigned long del_links_mask, unsigned long add_links_mask) { struct ieee80211_key *key; int ret; list_for_each_entry(key, &sdata->key_list, list) { if (key->conf.link_id < 0 || !(del_links_mask & BIT(key->conf.link_id))) continue; /* shouldn't happen for per-link keys */ WARN_ON(key->sta); ieee80211_key_disable_hw_accel(key); } list_for_each_entry(key, &sdata->key_list, list) { if (key->conf.link_id < 0 || !(add_links_mask & BIT(key->conf.link_id))) continue; /* shouldn't happen for per-link keys */ WARN_ON(key->sta); ret = ieee80211_key_enable_hw_accel(key); if (ret) return ret; } return 0; }
3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 // SPDX-License-Identifier: GPL-2.0 #include <linux/module.h> #include <linux/netfilter/nf_tables.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_tables.h> #include <net/netfilter/nf_tables_ipv4.h> #include <net/netfilter/nf_tables_ipv6.h> static unsigned int nft_nat_do_chain(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { struct nft_pktinfo pkt; nft_set_pktinfo(&pkt, skb, state); switch (state->pf) { #ifdef CONFIG_NF_TABLES_IPV4 case NFPROTO_IPV4: nft_set_pktinfo_ipv4(&pkt); break; #endif #ifdef CONFIG_NF_TABLES_IPV6 case NFPROTO_IPV6: nft_set_pktinfo_ipv6(&pkt); break; #endif default: break; } return nft_do_chain(&pkt, priv); } #ifdef CONFIG_NF_TABLES_IPV4 static const struct nft_chain_type nft_chain_nat_ipv4 = { .name = "nat", .type = NFT_CHAIN_T_NAT, .family = NFPROTO_IPV4, .owner = THIS_MODULE, .hook_mask = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_LOCAL_IN), .hooks = { [NF_INET_PRE_ROUTING] = nft_nat_do_chain, [NF_INET_POST_ROUTING] = nft_nat_do_chain, [NF_INET_LOCAL_OUT] = nft_nat_do_chain, [NF_INET_LOCAL_IN] = nft_nat_do_chain, }, .ops_register = nf_nat_ipv4_register_fn, .ops_unregister = nf_nat_ipv4_unregister_fn, }; #endif #ifdef CONFIG_NF_TABLES_IPV6 static const struct nft_chain_type nft_chain_nat_ipv6 = { .name = "nat", .type = NFT_CHAIN_T_NAT, .family = NFPROTO_IPV6, .owner = THIS_MODULE, .hook_mask = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_LOCAL_IN), .hooks = { [NF_INET_PRE_ROUTING] = nft_nat_do_chain, [NF_INET_POST_ROUTING] = nft_nat_do_chain, [NF_INET_LOCAL_OUT] = nft_nat_do_chain, [NF_INET_LOCAL_IN] = nft_nat_do_chain, }, .ops_register = nf_nat_ipv6_register_fn, .ops_unregister = nf_nat_ipv6_unregister_fn, }; #endif #ifdef CONFIG_NF_TABLES_INET static int nft_nat_inet_reg(struct net *net, const struct nf_hook_ops *ops) { return nf_nat_inet_register_fn(net, ops); } static void nft_nat_inet_unreg(struct net *net, const struct nf_hook_ops *ops) { nf_nat_inet_unregister_fn(net, ops); } static const struct nft_chain_type nft_chain_nat_inet = { .name = "nat", .type = NFT_CHAIN_T_NAT, .family = NFPROTO_INET, .owner = THIS_MODULE, .hook_mask = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_POST_ROUTING), .hooks = { [NF_INET_PRE_ROUTING] = nft_nat_do_chain, [NF_INET_LOCAL_IN] = nft_nat_do_chain, [NF_INET_LOCAL_OUT] = nft_nat_do_chain, [NF_INET_POST_ROUTING] = nft_nat_do_chain, }, .ops_register = nft_nat_inet_reg, .ops_unregister = nft_nat_inet_unreg, }; #endif static int __init nft_chain_nat_init(void) { #ifdef CONFIG_NF_TABLES_IPV6 nft_register_chain_type(&nft_chain_nat_ipv6); #endif #ifdef CONFIG_NF_TABLES_IPV4 nft_register_chain_type(&nft_chain_nat_ipv4); #endif #ifdef CONFIG_NF_TABLES_INET nft_register_chain_type(&nft_chain_nat_inet); #endif return 0; } static void __exit nft_chain_nat_exit(void) { #ifdef CONFIG_NF_TABLES_IPV4 nft_unregister_chain_type(&nft_chain_nat_ipv4); #endif #ifdef CONFIG_NF_TABLES_IPV6 nft_unregister_chain_type(&nft_chain_nat_ipv6); #endif #ifdef CONFIG_NF_TABLES_INET nft_unregister_chain_type(&nft_chain_nat_inet); #endif } module_init(nft_chain_nat_init); module_exit(nft_chain_nat_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("nftables network address translation support"); #ifdef CONFIG_NF_TABLES_IPV4 MODULE_ALIAS_NFT_CHAIN(AF_INET, "nat"); #endif #ifdef CONFIG_NF_TABLES_IPV6 MODULE_ALIAS_NFT_CHAIN(AF_INET6, "nat"); #endif #ifdef CONFIG_NF_TABLES_INET MODULE_ALIAS_NFT_CHAIN(1, "nat"); /* NFPROTO_INET */ #endif
21402 21402 21298 21412 21291 21402 21402 21282 13087 13029 13026 13025 13096 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 // SPDX-License-Identifier: GPL-2.0 #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/memblock.h> #include <linux/page_ext.h> #include <linux/memory.h> #include <linux/vmalloc.h> #include <linux/kmemleak.h> #include <linux/page_owner.h> #include <linux/page_idle.h> #include <linux/page_table_check.h> #include <linux/rcupdate.h> #include <linux/pgalloc_tag.h> /* * struct page extension * * This is the feature to manage memory for extended data per page. * * Until now, we must modify struct page itself to store extra data per page. * This requires rebuilding the kernel and it is really time consuming process. * And, sometimes, rebuild is impossible due to third party module dependency. * At last, enlarging struct page could cause un-wanted system behaviour change. * * This feature is intended to overcome above mentioned problems. This feature * allocates memory for extended data per page in certain place rather than * the struct page itself. This memory can be accessed by the accessor * functions provided by this code. During the boot process, it checks whether * allocation of huge chunk of memory is needed or not. If not, it avoids * allocating memory at all. With this advantage, we can include this feature * into the kernel in default and can avoid rebuild and solve related problems. * * To help these things to work well, there are two callbacks for clients. One * is the need callback which is mandatory if user wants to avoid useless * memory allocation at boot-time. The other is optional, init callback, which * is used to do proper initialization after memory is allocated. * * The need callback is used to decide whether extended memory allocation is * needed or not. Sometimes users want to deactivate some features in this * boot and extra memory would be unnecessary. In this case, to avoid * allocating huge chunk of memory, each clients represent their need of * extra memory through the need callback. If one of the need callbacks * returns true, it means that someone needs extra memory so that * page extension core should allocates memory for page extension. If * none of need callbacks return true, memory isn't needed at all in this boot * and page extension core can skip to allocate memory. As result, * none of memory is wasted. * * When need callback returns true, page_ext checks if there is a request for * extra memory through size in struct page_ext_operations. If it is non-zero, * extra space is allocated for each page_ext entry and offset is returned to * user through offset in struct page_ext_operations. * * The init callback is used to do proper initialization after page extension * is completely initialized. In sparse memory system, extra memory is * allocated some time later than memmap is allocated. In other words, lifetime * of memory for page extension isn't same with memmap for struct page. * Therefore, clients can't store extra data until page extension is * initialized, even if pages are allocated and used freely. This could * cause inadequate state of extra data per page, so, to prevent it, client * can utilize this callback to initialize the state of it correctly. */ #ifdef CONFIG_SPARSEMEM #define PAGE_EXT_INVALID (0x1) #endif #if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT) static bool need_page_idle(void) { return true; } static struct page_ext_operations page_idle_ops __initdata = { .need = need_page_idle, .need_shared_flags = true, }; #endif static struct page_ext_operations *page_ext_ops[] __initdata = { #ifdef CONFIG_PAGE_OWNER &page_owner_ops, #endif #if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT) &page_idle_ops, #endif #ifdef CONFIG_MEM_ALLOC_PROFILING &page_alloc_tagging_ops, #endif #ifdef CONFIG_PAGE_TABLE_CHECK &page_table_check_ops, #endif }; unsigned long page_ext_size; static unsigned long total_usage; #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG /* * To ensure correct allocation tagging for pages, page_ext should be available * before the first page allocation. Otherwise early task stacks will be * allocated before page_ext initialization and missing tags will be flagged. */ bool early_page_ext __meminitdata = true; #else bool early_page_ext __meminitdata; #endif static int __init setup_early_page_ext(char *str) { early_page_ext = true; return 0; } early_param("early_page_ext", setup_early_page_ext); static bool __init invoke_need_callbacks(void) { int i; int entries = ARRAY_SIZE(page_ext_ops); bool need = false; for (i = 0; i < entries; i++) { if (page_ext_ops[i]->need()) { if (page_ext_ops[i]->need_shared_flags) { page_ext_size = sizeof(struct page_ext); break; } } } for (i = 0; i < entries; i++) { if (page_ext_ops[i]->need()) { page_ext_ops[i]->offset = page_ext_size; page_ext_size += page_ext_ops[i]->size; need = true; } } return need; } static void __init invoke_init_callbacks(void) { int i; int entries = ARRAY_SIZE(page_ext_ops); for (i = 0; i < entries; i++) { if (page_ext_ops[i]->init) page_ext_ops[i]->init(); } } static inline struct page_ext *get_entry(void *base, unsigned long index) { return base + page_ext_size * index; } #ifndef CONFIG_SPARSEMEM void __init page_ext_init_flatmem_late(void) { invoke_init_callbacks(); } void __meminit pgdat_page_ext_init(struct pglist_data *pgdat) { pgdat->node_page_ext = NULL; } static struct page_ext *lookup_page_ext(const struct page *page) { unsigned long pfn = page_to_pfn(page); unsigned long index; struct page_ext *base; WARN_ON_ONCE(!rcu_read_lock_held()); base = NODE_DATA(page_to_nid(page))->node_page_ext; /* * The sanity checks the page allocator does upon freeing a * page can reach here before the page_ext arrays are * allocated when feeding a range of pages to the allocator * for the first time during bootup or memory hotplug. */ if (unlikely(!base)) return NULL; index = pfn - round_down(node_start_pfn(page_to_nid(page)), MAX_ORDER_NR_PAGES); return get_entry(base, index); } static int __init alloc_node_page_ext(int nid) { struct page_ext *base; unsigned long table_size; unsigned long nr_pages; nr_pages = NODE_DATA(nid)->node_spanned_pages; if (!nr_pages) return 0; /* * Need extra space if node range is not aligned with * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm * checks buddy's status, range could be out of exact node range. */ if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) || !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES)) nr_pages += MAX_ORDER_NR_PAGES; table_size = page_ext_size * nr_pages; base = memblock_alloc_try_nid( table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid); if (!base) return -ENOMEM; NODE_DATA(nid)->node_page_ext = base; total_usage += table_size; memmap_boot_pages_add(DIV_ROUND_UP(table_size, PAGE_SIZE)); return 0; } void __init page_ext_init_flatmem(void) { int nid, fail; if (!invoke_need_callbacks()) return; for_each_online_node(nid) { fail = alloc_node_page_ext(nid); if (fail) goto fail; } pr_info("allocated %ld bytes of page_ext\n", total_usage); return; fail: pr_crit("allocation of page_ext failed.\n"); panic("Out of memory"); } #else /* CONFIG_SPARSEMEM */ static bool page_ext_invalid(struct page_ext *page_ext) { return !page_ext || (((unsigned long)page_ext & PAGE_EXT_INVALID) == PAGE_EXT_INVALID); } static struct page_ext *lookup_page_ext(const struct page *page) { unsigned long pfn = page_to_pfn(page); struct mem_section *section = __pfn_to_section(pfn); struct page_ext *page_ext = READ_ONCE(section->page_ext); WARN_ON_ONCE(!rcu_read_lock_held()); /* * The sanity checks the page allocator does upon freeing a * page can reach here before the page_ext arrays are * allocated when feeding a range of pages to the allocator * for the first time during bootup or memory hotplug. */ if (page_ext_invalid(page_ext)) return NULL; return get_entry(page_ext, pfn); } static void *__meminit alloc_page_ext(size_t size, int nid) { gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN; void *addr = NULL; addr = alloc_pages_exact_nid(nid, size, flags); if (addr) kmemleak_alloc(addr, size, 1, flags); else addr = vzalloc_node(size, nid); if (addr) memmap_pages_add(DIV_ROUND_UP(size, PAGE_SIZE)); return addr; } static int __meminit init_section_page_ext(unsigned long pfn, int nid) { struct mem_section *section; struct page_ext *base; unsigned long table_size; section = __pfn_to_section(pfn); if (section->page_ext) return 0; table_size = page_ext_size * PAGES_PER_SECTION; base = alloc_page_ext(table_size, nid); /* * The value stored in section->page_ext is (base - pfn) * and it does not point to the memory block allocated above, * causing kmemleak false positives. */ kmemleak_not_leak(base); if (!base) { pr_err("page ext allocation failure\n"); return -ENOMEM; } /* * The passed "pfn" may not be aligned to SECTION. For the calculation * we need to apply a mask. */ pfn &= PAGE_SECTION_MASK; section->page_ext = (void *)base - page_ext_size * pfn; total_usage += table_size; return 0; } static void free_page_ext(void *addr) { size_t table_size; struct page *page; table_size = page_ext_size * PAGES_PER_SECTION; memmap_pages_add(-1L * (DIV_ROUND_UP(table_size, PAGE_SIZE))); if (is_vmalloc_addr(addr)) { vfree(addr); } else { page = virt_to_page(addr); BUG_ON(PageReserved(page)); kmemleak_free(addr); free_pages_exact(addr, table_size); } } static void __free_page_ext(unsigned long pfn) { struct mem_section *ms; struct page_ext *base; ms = __pfn_to_section(pfn); if (!ms || !ms->page_ext) return; base = READ_ONCE(ms->page_ext); /* * page_ext here can be valid while doing the roll back * operation in online_page_ext(). */ if (page_ext_invalid(base)) base = (void *)base - PAGE_EXT_INVALID; WRITE_ONCE(ms->page_ext, NULL); base = get_entry(base, pfn); free_page_ext(base); } static void __invalidate_page_ext(unsigned long pfn) { struct mem_section *ms; void *val; ms = __pfn_to_section(pfn); if (!ms || !ms->page_ext) return; val = (void *)ms->page_ext + PAGE_EXT_INVALID; WRITE_ONCE(ms->page_ext, val); } static int __meminit online_page_ext(unsigned long start_pfn, unsigned long nr_pages, int nid) { unsigned long start, end, pfn; int fail = 0; start = SECTION_ALIGN_DOWN(start_pfn); end = SECTION_ALIGN_UP(start_pfn + nr_pages); if (nid == NUMA_NO_NODE) { /* * In this case, "nid" already exists and contains valid memory. * "start_pfn" passed to us is a pfn which is an arg for * online__pages(), and start_pfn should exist. */ nid = pfn_to_nid(start_pfn); VM_BUG_ON(!node_online(nid)); } for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) fail = init_section_page_ext(pfn, nid); if (!fail) return 0; /* rollback */ end = pfn - PAGES_PER_SECTION; for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) __free_page_ext(pfn); return -ENOMEM; } static void __meminit offline_page_ext(unsigned long start_pfn, unsigned long nr_pages) { unsigned long start, end, pfn; start = SECTION_ALIGN_DOWN(start_pfn); end = SECTION_ALIGN_UP(start_pfn + nr_pages); /* * Freeing of page_ext is done in 3 steps to avoid * use-after-free of it: * 1) Traverse all the sections and mark their page_ext * as invalid. * 2) Wait for all the existing users of page_ext who * started before invalidation to finish. * 3) Free the page_ext. */ for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) __invalidate_page_ext(pfn); synchronize_rcu(); for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) __free_page_ext(pfn); } static int __meminit page_ext_callback(struct notifier_block *self, unsigned long action, void *arg) { struct memory_notify *mn = arg; int ret = 0; switch (action) { case MEM_GOING_ONLINE: ret = online_page_ext(mn->start_pfn, mn->nr_pages, mn->status_change_nid); break; case MEM_OFFLINE: offline_page_ext(mn->start_pfn, mn->nr_pages); break; case MEM_CANCEL_ONLINE: offline_page_ext(mn->start_pfn, mn->nr_pages); break; case MEM_GOING_OFFLINE: break; case MEM_ONLINE: case MEM_CANCEL_OFFLINE: break; } return notifier_from_errno(ret); } void __init page_ext_init(void) { unsigned long pfn; int nid; if (!invoke_need_callbacks()) return; for_each_node_state(nid, N_MEMORY) { unsigned long start_pfn, end_pfn; start_pfn = node_start_pfn(nid); end_pfn = node_end_pfn(nid); /* * start_pfn and end_pfn may not be aligned to SECTION and the * page->flags of out of node pages are not initialized. So we * scan [start_pfn, the biggest section's pfn < end_pfn) here. */ for (pfn = start_pfn; pfn < end_pfn; pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) { if (!pfn_valid(pfn)) continue; /* * Nodes's pfns can be overlapping. * We know some arch can have a nodes layout such as * -------------pfn--------------> * N0 | N1 | N2 | N0 | N1 | N2|.... */ if (pfn_to_nid(pfn) != nid) continue; if (init_section_page_ext(pfn, nid)) goto oom; cond_resched(); } } hotplug_memory_notifier(page_ext_callback, DEFAULT_CALLBACK_PRI); pr_info("allocated %ld bytes of page_ext\n", total_usage); invoke_init_callbacks(); return; oom: panic("Out of memory"); } void __meminit pgdat_page_ext_init(struct pglist_data *pgdat) { } #endif /** * page_ext_lookup() - Lookup a page extension for a PFN. * @pfn: PFN of the page we're interested in. * * Must be called with RCU read lock taken and @pfn must be valid. * * Return: NULL if no page_ext exists for this page. */ struct page_ext *page_ext_lookup(unsigned long pfn) { return lookup_page_ext(pfn_to_page(pfn)); } /** * page_ext_get() - Get the extended information for a page. * @page: The page we're interested in. * * Ensures that the page_ext will remain valid until page_ext_put() * is called. * * Return: NULL if no page_ext exists for this page. * Context: Any context. Caller may not sleep until they have called * page_ext_put(). */ struct page_ext *page_ext_get(const struct page *page) { struct page_ext *page_ext; rcu_read_lock(); page_ext = lookup_page_ext(page); if (!page_ext) { rcu_read_unlock(); return NULL; } return page_ext; } /** * page_ext_put() - Working with page extended information is done. * @page_ext: Page extended information received from page_ext_get(). * * The page extended information of the page may not be valid after this * function is called. * * Return: None. * Context: Any context with corresponding page_ext_get() is called. */ void page_ext_put(struct page_ext *page_ext) { if (unlikely(!page_ext)) return; rcu_read_unlock(); }
55 55 55 55 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* SMBUS message transfer tracepoints * * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #undef TRACE_SYSTEM #define TRACE_SYSTEM smbus #if !defined(_TRACE_SMBUS_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_SMBUS_H #include <linux/i2c.h> #include <linux/tracepoint.h> /* * drivers/i2c/i2c-core-smbus.c */ /* * i2c_smbus_xfer() write data or procedure call request */ TRACE_EVENT_CONDITION(smbus_write, TP_PROTO(const struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int protocol, const union i2c_smbus_data *data), TP_ARGS(adap, addr, flags, read_write, command, protocol, data), TP_CONDITION(read_write == I2C_SMBUS_WRITE || protocol == I2C_SMBUS_PROC_CALL || protocol == I2C_SMBUS_BLOCK_PROC_CALL), TP_STRUCT__entry( __field(int, adapter_nr ) __field(__u16, addr ) __field(__u16, flags ) __field(__u8, command ) __field(__u8, len ) __field(__u32, protocol ) __array(__u8, buf, I2C_SMBUS_BLOCK_MAX + 2) ), TP_fast_assign( __entry->adapter_nr = adap->nr; __entry->addr = addr; __entry->flags = flags; __entry->command = command; __entry->protocol = protocol; switch (protocol) { case I2C_SMBUS_BYTE_DATA: __entry->len = 1; goto copy; case I2C_SMBUS_WORD_DATA: case I2C_SMBUS_PROC_CALL: __entry->len = 2; goto copy; case I2C_SMBUS_BLOCK_DATA: case I2C_SMBUS_BLOCK_PROC_CALL: case I2C_SMBUS_I2C_BLOCK_DATA: __entry->len = data->block[0] + 1; copy: memcpy(__entry->buf, data->block, __entry->len); break; case I2C_SMBUS_QUICK: case I2C_SMBUS_BYTE: case I2C_SMBUS_I2C_BLOCK_BROKEN: default: __entry->len = 0; } ), TP_printk("i2c-%d a=%03x f=%04x c=%x %s l=%u [%*phD]", __entry->adapter_nr, __entry->addr, __entry->flags, __entry->command, __print_symbolic(__entry->protocol, { I2C_SMBUS_QUICK, "QUICK" }, { I2C_SMBUS_BYTE, "BYTE" }, { I2C_SMBUS_BYTE_DATA, "BYTE_DATA" }, { I2C_SMBUS_WORD_DATA, "WORD_DATA" }, { I2C_SMBUS_PROC_CALL, "PROC_CALL" }, { I2C_SMBUS_BLOCK_DATA, "BLOCK_DATA" }, { I2C_SMBUS_I2C_BLOCK_BROKEN, "I2C_BLOCK_BROKEN" }, { I2C_SMBUS_BLOCK_PROC_CALL, "BLOCK_PROC_CALL" }, { I2C_SMBUS_I2C_BLOCK_DATA, "I2C_BLOCK_DATA" }), __entry->len, __entry->len, __entry->buf )); /* * i2c_smbus_xfer() read data request */ TRACE_EVENT_CONDITION(smbus_read, TP_PROTO(const struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int protocol), TP_ARGS(adap, addr, flags, read_write, command, protocol), TP_CONDITION(!(read_write == I2C_SMBUS_WRITE || protocol == I2C_SMBUS_PROC_CALL || protocol == I2C_SMBUS_BLOCK_PROC_CALL)), TP_STRUCT__entry( __field(int, adapter_nr ) __field(__u16, flags ) __field(__u16, addr ) __field(__u8, command ) __field(__u32, protocol ) __array(__u8, buf, I2C_SMBUS_BLOCK_MAX + 2) ), TP_fast_assign( __entry->adapter_nr = adap->nr; __entry->addr = addr; __entry->flags = flags; __entry->command = command; __entry->protocol = protocol; ), TP_printk("i2c-%d a=%03x f=%04x c=%x %s", __entry->adapter_nr, __entry->addr, __entry->flags, __entry->command, __print_symbolic(__entry->protocol, { I2C_SMBUS_QUICK, "QUICK" }, { I2C_SMBUS_BYTE, "BYTE" }, { I2C_SMBUS_BYTE_DATA, "BYTE_DATA" }, { I2C_SMBUS_WORD_DATA, "WORD_DATA" }, { I2C_SMBUS_PROC_CALL, "PROC_CALL" }, { I2C_SMBUS_BLOCK_DATA, "BLOCK_DATA" }, { I2C_SMBUS_I2C_BLOCK_BROKEN, "I2C_BLOCK_BROKEN" }, { I2C_SMBUS_BLOCK_PROC_CALL, "BLOCK_PROC_CALL" }, { I2C_SMBUS_I2C_BLOCK_DATA, "I2C_BLOCK_DATA" }) )); /* * i2c_smbus_xfer() read data or procedure call reply */ TRACE_EVENT_CONDITION(smbus_reply, TP_PROTO(const struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int protocol, const union i2c_smbus_data *data, int res), TP_ARGS(adap, addr, flags, read_write, command, protocol, data, res), TP_CONDITION(res >= 0 && read_write == I2C_SMBUS_READ), TP_STRUCT__entry( __field(int, adapter_nr ) __field(__u16, addr ) __field(__u16, flags ) __field(__u8, command ) __field(__u8, len ) __field(__u32, protocol ) __array(__u8, buf, I2C_SMBUS_BLOCK_MAX + 2) ), TP_fast_assign( __entry->adapter_nr = adap->nr; __entry->addr = addr; __entry->flags = flags; __entry->command = command; __entry->protocol = protocol; switch (protocol) { case I2C_SMBUS_BYTE: case I2C_SMBUS_BYTE_DATA: __entry->len = 1; goto copy; case I2C_SMBUS_WORD_DATA: case I2C_SMBUS_PROC_CALL: __entry->len = 2; goto copy; case I2C_SMBUS_BLOCK_DATA: case I2C_SMBUS_BLOCK_PROC_CALL: case I2C_SMBUS_I2C_BLOCK_DATA: __entry->len = data->block[0] + 1; copy: memcpy(__entry->buf, data->block, __entry->len); break; case I2C_SMBUS_QUICK: case I2C_SMBUS_I2C_BLOCK_BROKEN: default: __entry->len = 0; } ), TP_printk("i2c-%d a=%03x f=%04x c=%x %s l=%u [%*phD]", __entry->adapter_nr, __entry->addr, __entry->flags, __entry->command, __print_symbolic(__entry->protocol, { I2C_SMBUS_QUICK, "QUICK" }, { I2C_SMBUS_BYTE, "BYTE" }, { I2C_SMBUS_BYTE_DATA, "BYTE_DATA" }, { I2C_SMBUS_WORD_DATA, "WORD_DATA" }, { I2C_SMBUS_PROC_CALL, "PROC_CALL" }, { I2C_SMBUS_BLOCK_DATA, "BLOCK_DATA" }, { I2C_SMBUS_I2C_BLOCK_BROKEN, "I2C_BLOCK_BROKEN" }, { I2C_SMBUS_BLOCK_PROC_CALL, "BLOCK_PROC_CALL" }, { I2C_SMBUS_I2C_BLOCK_DATA, "I2C_BLOCK_DATA" }), __entry->len, __entry->len, __entry->buf )); /* * i2c_smbus_xfer() result */ TRACE_EVENT(smbus_result, TP_PROTO(const struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int protocol, int res), TP_ARGS(adap, addr, flags, read_write, command, protocol, res), TP_STRUCT__entry( __field(int, adapter_nr ) __field(__u16, addr ) __field(__u16, flags ) __field(__u8, read_write ) __field(__u8, command ) __field(__s16, res ) __field(__u32, protocol ) ), TP_fast_assign( __entry->adapter_nr = adap->nr; __entry->addr = addr; __entry->flags = flags; __entry->read_write = read_write; __entry->command = command; __entry->protocol = protocol; __entry->res = res; ), TP_printk("i2c-%d a=%03x f=%04x c=%x %s %s res=%d", __entry->adapter_nr, __entry->addr, __entry->flags, __entry->command, __print_symbolic(__entry->protocol, { I2C_SMBUS_QUICK, "QUICK" }, { I2C_SMBUS_BYTE, "BYTE" }, { I2C_SMBUS_BYTE_DATA, "BYTE_DATA" }, { I2C_SMBUS_WORD_DATA, "WORD_DATA" }, { I2C_SMBUS_PROC_CALL, "PROC_CALL" }, { I2C_SMBUS_BLOCK_DATA, "BLOCK_DATA" }, { I2C_SMBUS_I2C_BLOCK_BROKEN, "I2C_BLOCK_BROKEN" }, { I2C_SMBUS_BLOCK_PROC_CALL, "BLOCK_PROC_CALL" }, { I2C_SMBUS_I2C_BLOCK_DATA, "I2C_BLOCK_DATA" }), __entry->read_write == I2C_SMBUS_WRITE ? "wr" : "rd", __entry->res )); #endif /* _TRACE_SMBUS_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
13 18 18 18 18 1 1 1 1 1 9 9 9 9 9 9 9 14 2 14 13 13 13 14 13 13 13 13 13 13 14 4 4 4 8 8 1 1 2 1 1 2 2 1 2 8 8 1 6 6 3 1 3 6 2 8 2 8 1 1 1 9 9 2 7 8 3 1 2 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 // SPDX-License-Identifier: GPL-2.0-only /* * 6pack.c This module implements the 6pack protocol for kernel-based * devices like TTY. It interfaces between a raw TTY and the * kernel's AX.25 protocol layers. * * Authors: Andreas Könsgen <ajk@comnets.uni-bremen.de> * Ralf Baechle DL5RB <ralf@linux-mips.org> * * Quite a lot of stuff "stolen" by Joerg Reuter from slip.c, written by * * Laurence Culhane, <loz@holmes.demon.co.uk> * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org> */ #include <linux/module.h> #include <linux/uaccess.h> #include <linux/bitops.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/tty.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/timer.h> #include <linux/slab.h> #include <net/ax25.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/spinlock.h> #include <linux/if_arp.h> #include <linux/init.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/semaphore.h> #include <linux/refcount.h> /* sixpack priority commands */ #define SIXP_SEOF 0x40 /* start and end of a 6pack frame */ #define SIXP_TX_URUN 0x48 /* transmit overrun */ #define SIXP_RX_ORUN 0x50 /* receive overrun */ #define SIXP_RX_BUF_OVL 0x58 /* receive buffer overflow */ #define SIXP_CHKSUM 0xFF /* valid checksum of a 6pack frame */ /* masks to get certain bits out of the status bytes sent by the TNC */ #define SIXP_CMD_MASK 0xC0 #define SIXP_CHN_MASK 0x07 #define SIXP_PRIO_CMD_MASK 0x80 #define SIXP_STD_CMD_MASK 0x40 #define SIXP_PRIO_DATA_MASK 0x38 #define SIXP_TX_MASK 0x20 #define SIXP_RX_MASK 0x10 #define SIXP_RX_DCD_MASK 0x18 #define SIXP_LEDS_ON 0x78 #define SIXP_LEDS_OFF 0x60 #define SIXP_CON 0x08 #define SIXP_STA 0x10 #define SIXP_FOUND_TNC 0xe9 #define SIXP_CON_ON 0x68 #define SIXP_DCD_MASK 0x08 #define SIXP_DAMA_OFF 0 /* default level 2 parameters */ #define SIXP_TXDELAY 25 /* 250 ms */ #define SIXP_PERSIST 50 /* in 256ths */ #define SIXP_SLOTTIME 10 /* 100 ms */ #define SIXP_INIT_RESYNC_TIMEOUT (3*HZ/2) /* in 1 s */ #define SIXP_RESYNC_TIMEOUT 5*HZ /* in 1 s */ /* 6pack configuration. */ #define SIXP_NRUNIT 31 /* MAX number of 6pack channels */ #define SIXP_MTU 256 /* Default MTU */ enum sixpack_flags { SIXPF_ERROR, /* Parity, etc. error */ }; struct sixpack { /* Various fields. */ struct tty_struct *tty; /* ptr to TTY structure */ struct net_device *dev; /* easy for intr handling */ /* These are pointers to the malloc()ed frame buffers. */ int rcount; /* received chars counter */ unsigned char *xbuff; /* transmitter buffer */ unsigned char *xhead; /* next byte to XMIT */ int xleft; /* bytes left in XMIT queue */ u8 raw_buf[4]; u8 cooked_buf[400]; unsigned int rx_count; unsigned int rx_count_cooked; spinlock_t rxlock; unsigned long flags; /* Flag values/ mode etc */ unsigned char mode; /* 6pack mode */ /* 6pack stuff */ unsigned char tx_delay; unsigned char persistence; unsigned char slottime; unsigned char duplex; unsigned char led_state; u8 status; u8 status1; unsigned char status2; unsigned char tx_enable; unsigned char tnc_state; struct timer_list tx_t; struct timer_list resync_t; refcount_t refcnt; struct completion dead; spinlock_t lock; }; #define AX25_6PACK_HEADER_LEN 0 static void sixpack_decode(struct sixpack *, const u8 *, size_t); static int encode_sixpack(unsigned char *, unsigned char *, int, unsigned char); /* * Perform the persistence/slottime algorithm for CSMA access. If the * persistence check was successful, write the data to the serial driver. * Note that in case of DAMA operation, the data is not sent here. */ static void sp_xmit_on_air(struct timer_list *t) { struct sixpack *sp = from_timer(sp, t, tx_t); int actual, when = sp->slottime; static unsigned char random; random = random * 17 + 41; if (((sp->status1 & SIXP_DCD_MASK) == 0) && (random < sp->persistence)) { sp->led_state = 0x70; sp->tty->ops->write(sp->tty, &sp->led_state, 1); sp->tx_enable = 1; actual = sp->tty->ops->write(sp->tty, sp->xbuff, sp->status2); sp->xleft -= actual; sp->xhead += actual; sp->led_state = 0x60; sp->tty->ops->write(sp->tty, &sp->led_state, 1); sp->status2 = 0; } else mod_timer(&sp->tx_t, jiffies + ((when + 1) * HZ) / 100); } /* ----> 6pack timer interrupt handler and friends. <---- */ /* Encapsulate one AX.25 frame and stuff into a TTY queue. */ static void sp_encaps(struct sixpack *sp, unsigned char *icp, int len) { unsigned char *msg, *p = icp; int actual, count; if (len > AX25_MTU + 73) { msg = "oversized transmit packet!"; goto out_drop; } if (p[0] > 5) { msg = "invalid KISS command"; goto out_drop; } if ((p[0] != 0) && (len > 2)) { msg = "KISS control packet too long"; goto out_drop; } if ((p[0] == 0) && (len < 15)) { msg = "bad AX.25 packet to transmit"; goto out_drop; } count = encode_sixpack(p, sp->xbuff, len, sp->tx_delay); set_bit(TTY_DO_WRITE_WAKEUP, &sp->tty->flags); switch (p[0]) { case 1: sp->tx_delay = p[1]; return; case 2: sp->persistence = p[1]; return; case 3: sp->slottime = p[1]; return; case 4: /* ignored */ return; case 5: sp->duplex = p[1]; return; } if (p[0] != 0) return; /* * In case of fullduplex or DAMA operation, we don't take care about the * state of the DCD or of any timers, as the determination of the * correct time to send is the job of the AX.25 layer. We send * immediately after data has arrived. */ if (sp->duplex == 1) { sp->led_state = 0x70; sp->tty->ops->write(sp->tty, &sp->led_state, 1); sp->tx_enable = 1; actual = sp->tty->ops->write(sp->tty, sp->xbuff, count); sp->xleft = count - actual; sp->xhead = sp->xbuff + actual; sp->led_state = 0x60; sp->tty->ops->write(sp->tty, &sp->led_state, 1); } else { sp->xleft = count; sp->xhead = sp->xbuff; sp->status2 = count; sp_xmit_on_air(&sp->tx_t); } return; out_drop: sp->dev->stats.tx_dropped++; netif_start_queue(sp->dev); if (net_ratelimit()) printk(KERN_DEBUG "%s: %s - dropped.\n", sp->dev->name, msg); } /* Encapsulate an IP datagram and kick it into a TTY queue. */ static netdev_tx_t sp_xmit(struct sk_buff *skb, struct net_device *dev) { struct sixpack *sp = netdev_priv(dev); if (skb->protocol == htons(ETH_P_IP)) return ax25_ip_xmit(skb); spin_lock_bh(&sp->lock); /* We were not busy, so we are now... :-) */ netif_stop_queue(dev); dev->stats.tx_bytes += skb->len; sp_encaps(sp, skb->data, skb->len); spin_unlock_bh(&sp->lock); dev_kfree_skb(skb); return NETDEV_TX_OK; } static int sp_open_dev(struct net_device *dev) { struct sixpack *sp = netdev_priv(dev); if (sp->tty == NULL) return -ENODEV; return 0; } /* Close the low-level part of the 6pack channel. */ static int sp_close(struct net_device *dev) { struct sixpack *sp = netdev_priv(dev); spin_lock_bh(&sp->lock); if (sp->tty) { /* TTY discipline is running. */ clear_bit(TTY_DO_WRITE_WAKEUP, &sp->tty->flags); } netif_stop_queue(dev); spin_unlock_bh(&sp->lock); return 0; } static int sp_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr_ax25 *sa = addr; netif_tx_lock_bh(dev); netif_addr_lock(dev); __dev_addr_set(dev, &sa->sax25_call, AX25_ADDR_LEN); netif_addr_unlock(dev); netif_tx_unlock_bh(dev); return 0; } static const struct net_device_ops sp_netdev_ops = { .ndo_open = sp_open_dev, .ndo_stop = sp_close, .ndo_start_xmit = sp_xmit, .ndo_set_mac_address = sp_set_mac_address, }; static void sp_setup(struct net_device *dev) { /* Finish setting up the DEVICE info. */ dev->netdev_ops = &sp_netdev_ops; dev->mtu = SIXP_MTU; dev->hard_header_len = AX25_MAX_HEADER_LEN; dev->header_ops = &ax25_header_ops; dev->addr_len = AX25_ADDR_LEN; dev->type = ARPHRD_AX25; dev->tx_queue_len = 10; /* Only activated in AX.25 mode */ memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); dev_addr_set(dev, (u8 *)&ax25_defaddr); dev->flags = 0; } /* Send one completely decapsulated IP datagram to the IP layer. */ /* * This is the routine that sends the received data to the kernel AX.25. * 'cmd' is the KISS command. For AX.25 data, it is zero. */ static void sp_bump(struct sixpack *sp, char cmd) { struct sk_buff *skb; int count; u8 *ptr; count = sp->rcount + 1; sp->dev->stats.rx_bytes += count; if ((skb = dev_alloc_skb(count + 1)) == NULL) goto out_mem; ptr = skb_put(skb, count + 1); *ptr++ = cmd; /* KISS command */ memcpy(ptr, sp->cooked_buf + 1, count); skb->protocol = ax25_type_trans(skb, sp->dev); netif_rx(skb); sp->dev->stats.rx_packets++; return; out_mem: sp->dev->stats.rx_dropped++; } /* ----------------------------------------------------------------------- */ /* * We have a potential race on dereferencing tty->disc_data, because the tty * layer provides no locking at all - thus one cpu could be running * sixpack_receive_buf while another calls sixpack_close, which zeroes * tty->disc_data and frees the memory that sixpack_receive_buf is using. The * best way to fix this is to use a rwlock in the tty struct, but for now we * use a single global rwlock for all ttys in ppp line discipline. */ static DEFINE_RWLOCK(disc_data_lock); static struct sixpack *sp_get(struct tty_struct *tty) { struct sixpack *sp; read_lock(&disc_data_lock); sp = tty->disc_data; if (sp) refcount_inc(&sp->refcnt); read_unlock(&disc_data_lock); return sp; } static void sp_put(struct sixpack *sp) { if (refcount_dec_and_test(&sp->refcnt)) complete(&sp->dead); } /* * Called by the TTY driver when there's room for more data. If we have * more packets to send, we send them here. */ static void sixpack_write_wakeup(struct tty_struct *tty) { struct sixpack *sp = sp_get(tty); int actual; if (!sp) return; if (sp->xleft <= 0) { /* Now serial buffer is almost free & we can start * transmission of another packet */ sp->dev->stats.tx_packets++; clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); sp->tx_enable = 0; netif_wake_queue(sp->dev); goto out; } if (sp->tx_enable) { actual = tty->ops->write(tty, sp->xhead, sp->xleft); sp->xleft -= actual; sp->xhead += actual; } out: sp_put(sp); } /* ----------------------------------------------------------------------- */ /* * Handle the 'receiver data ready' interrupt. * This function is called by the tty module in the kernel when * a block of 6pack data has been received, which can now be decapsulated * and sent on to some IP layer for further processing. */ static void sixpack_receive_buf(struct tty_struct *tty, const u8 *cp, const u8 *fp, size_t count) { struct sixpack *sp; size_t count1; if (!count) return; sp = sp_get(tty); if (!sp) return; /* Read the characters out of the buffer */ count1 = count; while (count) { count--; if (fp && *fp++) { if (!test_and_set_bit(SIXPF_ERROR, &sp->flags)) sp->dev->stats.rx_errors++; continue; } } sixpack_decode(sp, cp, count1); sp_put(sp); tty_unthrottle(tty); } /* * Try to resync the TNC. Called by the resync timer defined in * decode_prio_command */ #define TNC_UNINITIALIZED 0 #define TNC_UNSYNC_STARTUP 1 #define TNC_UNSYNCED 2 #define TNC_IN_SYNC 3 static void __tnc_set_sync_state(struct sixpack *sp, int new_tnc_state) { char *msg; switch (new_tnc_state) { default: /* gcc oh piece-o-crap ... */ case TNC_UNSYNC_STARTUP: msg = "Synchronizing with TNC"; break; case TNC_UNSYNCED: msg = "Lost synchronization with TNC\n"; break; case TNC_IN_SYNC: msg = "Found TNC"; break; } sp->tnc_state = new_tnc_state; printk(KERN_INFO "%s: %s\n", sp->dev->name, msg); } static inline void tnc_set_sync_state(struct sixpack *sp, int new_tnc_state) { int old_tnc_state = sp->tnc_state; if (old_tnc_state != new_tnc_state) __tnc_set_sync_state(sp, new_tnc_state); } static void resync_tnc(struct timer_list *t) { struct sixpack *sp = from_timer(sp, t, resync_t); static char resync_cmd = 0xe8; /* clear any data that might have been received */ sp->rx_count = 0; sp->rx_count_cooked = 0; /* reset state machine */ sp->status = 1; sp->status1 = 1; sp->status2 = 0; /* resync the TNC */ sp->led_state = 0x60; sp->tty->ops->write(sp->tty, &sp->led_state, 1); sp->tty->ops->write(sp->tty, &resync_cmd, 1); /* Start resync timer again -- the TNC might be still absent */ mod_timer(&sp->resync_t, jiffies + SIXP_RESYNC_TIMEOUT); } static inline int tnc_init(struct sixpack *sp) { unsigned char inbyte = 0xe8; tnc_set_sync_state(sp, TNC_UNSYNC_STARTUP); sp->tty->ops->write(sp->tty, &inbyte, 1); mod_timer(&sp->resync_t, jiffies + SIXP_RESYNC_TIMEOUT); return 0; } /* * Open the high-level part of the 6pack channel. * This function is called by the TTY module when the * 6pack line discipline is called for. Because we are * sure the tty line exists, we only have to link it to * a free 6pcack channel... */ static int sixpack_open(struct tty_struct *tty) { char *xbuff = NULL; struct net_device *dev; struct sixpack *sp; unsigned long len; int err = 0; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (tty->ops->write == NULL) return -EOPNOTSUPP; dev = alloc_netdev(sizeof(struct sixpack), "sp%d", NET_NAME_UNKNOWN, sp_setup); if (!dev) { err = -ENOMEM; goto out; } sp = netdev_priv(dev); sp->dev = dev; spin_lock_init(&sp->lock); spin_lock_init(&sp->rxlock); refcount_set(&sp->refcnt, 1); init_completion(&sp->dead); /* !!! length of the buffers. MTU is IP MTU, not PACLEN! */ len = dev->mtu * 2; xbuff = kmalloc(len + 4, GFP_KERNEL); if (xbuff == NULL) { err = -ENOBUFS; goto out_free; } spin_lock_bh(&sp->lock); sp->tty = tty; sp->xbuff = xbuff; sp->rcount = 0; sp->rx_count = 0; sp->rx_count_cooked = 0; sp->xleft = 0; sp->flags = 0; /* Clear ESCAPE & ERROR flags */ sp->duplex = 0; sp->tx_delay = SIXP_TXDELAY; sp->persistence = SIXP_PERSIST; sp->slottime = SIXP_SLOTTIME; sp->led_state = 0x60; sp->status = 1; sp->status1 = 1; sp->status2 = 0; sp->tx_enable = 0; netif_start_queue(dev); timer_setup(&sp->tx_t, sp_xmit_on_air, 0); timer_setup(&sp->resync_t, resync_tnc, 0); spin_unlock_bh(&sp->lock); /* Done. We have linked the TTY line to a channel. */ tty->disc_data = sp; tty->receive_room = 65536; /* Now we're ready to register. */ err = register_netdev(dev); if (err) goto out_free; tnc_init(sp); return 0; out_free: kfree(xbuff); free_netdev(dev); out: return err; } /* * Close down a 6pack channel. * This means flushing out any pending queues, and then restoring the * TTY line discipline to what it was before it got hooked to 6pack * (which usually is TTY again). */ static void sixpack_close(struct tty_struct *tty) { struct sixpack *sp; write_lock_irq(&disc_data_lock); sp = tty->disc_data; tty->disc_data = NULL; write_unlock_irq(&disc_data_lock); if (!sp) return; /* * We have now ensured that nobody can start using ap from now on, but * we have to wait for all existing users to finish. */ if (!refcount_dec_and_test(&sp->refcnt)) wait_for_completion(&sp->dead); /* We must stop the queue to avoid potentially scribbling * on the free buffers. The sp->dead completion is not sufficient * to protect us from sp->xbuff access. */ netif_stop_queue(sp->dev); unregister_netdev(sp->dev); timer_delete_sync(&sp->tx_t); timer_delete_sync(&sp->resync_t); /* Free all 6pack frame buffers after unreg. */ kfree(sp->xbuff); free_netdev(sp->dev); } /* Perform I/O control on an active 6pack channel. */ static int sixpack_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct sixpack *sp = sp_get(tty); struct net_device *dev; unsigned int tmp, err; if (!sp) return -ENXIO; dev = sp->dev; switch(cmd) { case SIOCGIFNAME: err = copy_to_user((void __user *) arg, dev->name, strlen(dev->name) + 1) ? -EFAULT : 0; break; case SIOCGIFENCAP: err = put_user(0, (int __user *) arg); break; case SIOCSIFENCAP: if (get_user(tmp, (int __user *) arg)) { err = -EFAULT; break; } sp->mode = tmp; dev->addr_len = AX25_ADDR_LEN; dev->hard_header_len = AX25_KISS_HEADER_LEN + AX25_MAX_HEADER_LEN + 3; dev->type = ARPHRD_AX25; err = 0; break; case SIOCSIFHWADDR: { char addr[AX25_ADDR_LEN]; if (copy_from_user(&addr, (void __user *)arg, AX25_ADDR_LEN)) { err = -EFAULT; break; } netif_tx_lock_bh(dev); __dev_addr_set(dev, &addr, AX25_ADDR_LEN); netif_tx_unlock_bh(dev); err = 0; break; } default: err = tty_mode_ioctl(tty, cmd, arg); } sp_put(sp); return err; } static struct tty_ldisc_ops sp_ldisc = { .owner = THIS_MODULE, .num = N_6PACK, .name = "6pack", .open = sixpack_open, .close = sixpack_close, .ioctl = sixpack_ioctl, .receive_buf = sixpack_receive_buf, .write_wakeup = sixpack_write_wakeup, }; /* Initialize 6pack control device -- register 6pack line discipline */ static int __init sixpack_init_driver(void) { int status; /* Register the provided line protocol discipline */ status = tty_register_ldisc(&sp_ldisc); if (status) pr_err("6pack: can't register line discipline (err = %d)\n", status); return status; } static void __exit sixpack_exit_driver(void) { tty_unregister_ldisc(&sp_ldisc); } /* encode an AX.25 packet into 6pack */ static int encode_sixpack(unsigned char *tx_buf, unsigned char *tx_buf_raw, int length, unsigned char tx_delay) { int count = 0; unsigned char checksum = 0, buf[400]; int raw_count = 0; tx_buf_raw[raw_count++] = SIXP_PRIO_CMD_MASK | SIXP_TX_MASK; tx_buf_raw[raw_count++] = SIXP_SEOF; buf[0] = tx_delay; for (count = 1; count < length; count++) buf[count] = tx_buf[count]; for (count = 0; count < length; count++) checksum += buf[count]; buf[length] = (unsigned char) 0xff - checksum; for (count = 0; count <= length; count++) { if ((count % 3) == 0) { tx_buf_raw[raw_count++] = (buf[count] & 0x3f); tx_buf_raw[raw_count] = ((buf[count] >> 2) & 0x30); } else if ((count % 3) == 1) { tx_buf_raw[raw_count++] |= (buf[count] & 0x0f); tx_buf_raw[raw_count] = ((buf[count] >> 2) & 0x3c); } else { tx_buf_raw[raw_count++] |= (buf[count] & 0x03); tx_buf_raw[raw_count++] = (buf[count] >> 2); } } if ((length % 3) != 2) raw_count++; tx_buf_raw[raw_count++] = SIXP_SEOF; return raw_count; } /* decode 4 sixpack-encoded bytes into 3 data bytes */ static void decode_data(struct sixpack *sp, u8 inbyte) { u8 *buf; if (sp->rx_count != 3) { sp->raw_buf[sp->rx_count++] = inbyte; return; } if (sp->rx_count_cooked + 2 >= sizeof(sp->cooked_buf)) { pr_err("6pack: cooked buffer overrun, data loss\n"); sp->rx_count = 0; return; } buf = sp->raw_buf; sp->cooked_buf[sp->rx_count_cooked++] = buf[0] | ((buf[1] << 2) & 0xc0); sp->cooked_buf[sp->rx_count_cooked++] = (buf[1] & 0x0f) | ((buf[2] << 2) & 0xf0); sp->cooked_buf[sp->rx_count_cooked++] = (buf[2] & 0x03) | (inbyte << 2); sp->rx_count = 0; } /* identify and execute a 6pack priority command byte */ static void decode_prio_command(struct sixpack *sp, u8 cmd) { ssize_t actual; if ((cmd & SIXP_PRIO_DATA_MASK) != 0) { /* idle ? */ /* RX and DCD flags can only be set in the same prio command, if the DCD flag has been set without the RX flag in the previous prio command. If DCD has not been set before, something in the transmission has gone wrong. In this case, RX and DCD are cleared in order to prevent the decode_data routine from reading further data that might be corrupt. */ if (((sp->status & SIXP_DCD_MASK) == 0) && ((cmd & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK)) { if (sp->status != 1) printk(KERN_DEBUG "6pack: protocol violation\n"); else sp->status = 0; cmd &= ~SIXP_RX_DCD_MASK; } sp->status = cmd & SIXP_PRIO_DATA_MASK; } else { /* output watchdog char if idle */ if ((sp->status2 != 0) && (sp->duplex == 1)) { sp->led_state = 0x70; sp->tty->ops->write(sp->tty, &sp->led_state, 1); sp->tx_enable = 1; actual = sp->tty->ops->write(sp->tty, sp->xbuff, sp->status2); sp->xleft -= actual; sp->xhead += actual; sp->led_state = 0x60; sp->status2 = 0; } } /* needed to trigger the TNC watchdog */ sp->tty->ops->write(sp->tty, &sp->led_state, 1); /* if the state byte has been received, the TNC is present, so the resync timer can be reset. */ if (sp->tnc_state == TNC_IN_SYNC) mod_timer(&sp->resync_t, jiffies + SIXP_INIT_RESYNC_TIMEOUT); sp->status1 = cmd & SIXP_PRIO_DATA_MASK; } /* identify and execute a standard 6pack command byte */ static void decode_std_command(struct sixpack *sp, u8 cmd) { u8 checksum = 0, rest = 0; short i; switch (cmd & SIXP_CMD_MASK) { /* normal command */ case SIXP_SEOF: if ((sp->rx_count == 0) && (sp->rx_count_cooked == 0)) { if ((sp->status & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK) { sp->led_state = 0x68; sp->tty->ops->write(sp->tty, &sp->led_state, 1); } } else { sp->led_state = 0x60; /* fill trailing bytes with zeroes */ sp->tty->ops->write(sp->tty, &sp->led_state, 1); spin_lock_bh(&sp->rxlock); rest = sp->rx_count; if (rest != 0) for (i = rest; i <= 3; i++) decode_data(sp, 0); if (rest == 2) sp->rx_count_cooked -= 2; else if (rest == 3) sp->rx_count_cooked -= 1; for (i = 0; i < sp->rx_count_cooked; i++) checksum += sp->cooked_buf[i]; if (checksum != SIXP_CHKSUM) { printk(KERN_DEBUG "6pack: bad checksum %2.2x\n", checksum); } else { sp->rcount = sp->rx_count_cooked-2; sp_bump(sp, 0); } sp->rx_count_cooked = 0; spin_unlock_bh(&sp->rxlock); } break; case SIXP_TX_URUN: printk(KERN_DEBUG "6pack: TX underrun\n"); break; case SIXP_RX_ORUN: printk(KERN_DEBUG "6pack: RX overrun\n"); break; case SIXP_RX_BUF_OVL: printk(KERN_DEBUG "6pack: RX buffer overflow\n"); } } /* decode a 6pack packet */ static void sixpack_decode(struct sixpack *sp, const u8 *pre_rbuff, size_t count) { size_t count1; u8 inbyte; for (count1 = 0; count1 < count; count1++) { inbyte = pre_rbuff[count1]; if (inbyte == SIXP_FOUND_TNC) { tnc_set_sync_state(sp, TNC_IN_SYNC); timer_delete(&sp->resync_t); } if ((inbyte & SIXP_PRIO_CMD_MASK) != 0) decode_prio_command(sp, inbyte); else if ((inbyte & SIXP_STD_CMD_MASK) != 0) decode_std_command(sp, inbyte); else if ((sp->status & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK) { spin_lock_bh(&sp->rxlock); decode_data(sp, inbyte); spin_unlock_bh(&sp->rxlock); } } } MODULE_AUTHOR("Ralf Baechle DO1GRB <ralf@linux-mips.org>"); MODULE_DESCRIPTION("6pack driver for AX.25"); MODULE_LICENSE("GPL"); MODULE_ALIAS_LDISC(N_6PACK); module_init(sixpack_init_driver); module_exit(sixpack_exit_driver);
6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 7 8 1 8 8 4 4 2 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 24 24 24 24 24 24 24 24 24 24 1 1 1 1 1 1 1 1 1 1 1 1 1 72 72 72 72 72 72 72 72 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 // SPDX-License-Identifier: GPL-2.0-only /* * Syscall interface to knfsd. * * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> */ #include <linux/slab.h> #include <linux/namei.h> #include <linux/ctype.h> #include <linux/fs_context.h> #include <linux/sunrpc/svcsock.h> #include <linux/lockd/lockd.h> #include <linux/sunrpc/addr.h> #include <linux/sunrpc/gss_api.h> #include <linux/sunrpc/rpc_pipe_fs.h> #include <linux/sunrpc/svc.h> #include <linux/module.h> #include <linux/fsnotify.h> #include <linux/nfslocalio.h> #include "idmap.h" #include "nfsd.h" #include "cache.h" #include "state.h" #include "netns.h" #include "pnfs.h" #include "filecache.h" #include "trace.h" #include "netlink.h" /* * We have a single directory with several nodes in it. */ enum { NFSD_Root = 1, NFSD_List, NFSD_Export_Stats, NFSD_Export_features, NFSD_Fh, NFSD_FO_UnlockIP, NFSD_FO_UnlockFS, NFSD_Threads, NFSD_Pool_Threads, NFSD_Pool_Stats, NFSD_Reply_Cache_Stats, NFSD_Versions, NFSD_Ports, NFSD_MaxBlkSize, NFSD_Filecache, NFSD_Leasetime, NFSD_Gracetime, NFSD_RecoveryDir, NFSD_V4EndGrace, NFSD_MaxReserved }; /* * write() for these nodes. */ static ssize_t write_filehandle(struct file *file, char *buf, size_t size); static ssize_t write_unlock_ip(struct file *file, char *buf, size_t size); static ssize_t write_unlock_fs(struct file *file, char *buf, size_t size); static ssize_t write_threads(struct file *file, char *buf, size_t size); static ssize_t write_pool_threads(struct file *file, char *buf, size_t size); static ssize_t write_versions(struct file *file, char *buf, size_t size); static ssize_t write_ports(struct file *file, char *buf, size_t size); static ssize_t write_maxblksize(struct file *file, char *buf, size_t size); #ifdef CONFIG_NFSD_V4 static ssize_t write_leasetime(struct file *file, char *buf, size_t size); static ssize_t write_gracetime(struct file *file, char *buf, size_t size); #ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING static ssize_t write_recoverydir(struct file *file, char *buf, size_t size); #endif static ssize_t write_v4_end_grace(struct file *file, char *buf, size_t size); #endif static ssize_t (*const write_op[])(struct file *, char *, size_t) = { [NFSD_Fh] = write_filehandle, [NFSD_FO_UnlockIP] = write_unlock_ip, [NFSD_FO_UnlockFS] = write_unlock_fs, [NFSD_Threads] = write_threads, [NFSD_Pool_Threads] = write_pool_threads, [NFSD_Versions] = write_versions, [NFSD_Ports] = write_ports, [NFSD_MaxBlkSize] = write_maxblksize, #ifdef CONFIG_NFSD_V4 [NFSD_Leasetime] = write_leasetime, [NFSD_Gracetime] = write_gracetime, #ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING [NFSD_RecoveryDir] = write_recoverydir, #endif [NFSD_V4EndGrace] = write_v4_end_grace, #endif }; static ssize_t nfsctl_transaction_write(struct file *file, const char __user *buf, size_t size, loff_t *pos) { ino_t ino = file_inode(file)->i_ino; char *data; ssize_t rv; if (ino >= ARRAY_SIZE(write_op) || !write_op[ino]) return -EINVAL; data = simple_transaction_get(file, buf, size); if (IS_ERR(data)) return PTR_ERR(data); rv = write_op[ino](file, data, size); if (rv < 0) return rv; simple_transaction_set(file, rv); return size; } static ssize_t nfsctl_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos) { if (! file->private_data) { /* An attempt to read a transaction file without writing * causes a 0-byte write so that the file can return * state information */ ssize_t rv = nfsctl_transaction_write(file, buf, 0, pos); if (rv < 0) return rv; } return simple_transaction_read(file, buf, size, pos); } static const struct file_operations transaction_ops = { .write = nfsctl_transaction_write, .read = nfsctl_transaction_read, .release = simple_transaction_release, .llseek = default_llseek, }; static int exports_net_open(struct net *net, struct file *file) { int err; struct seq_file *seq; struct nfsd_net *nn = net_generic(net, nfsd_net_id); err = seq_open(file, &nfs_exports_op); if (err) return err; seq = file->private_data; seq->private = nn->svc_export_cache; return 0; } static int exports_nfsd_open(struct inode *inode, struct file *file) { return exports_net_open(inode->i_sb->s_fs_info, file); } static const struct file_operations exports_nfsd_operations = { .open = exports_nfsd_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int export_features_show(struct seq_file *m, void *v) { seq_printf(m, "0x%x 0x%x\n", NFSEXP_ALLFLAGS, NFSEXP_SECINFO_FLAGS); return 0; } DEFINE_SHOW_ATTRIBUTE(export_features); static int nfsd_pool_stats_open(struct inode *inode, struct file *file) { struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id); return svc_pool_stats_open(&nn->nfsd_info, file); } static const struct file_operations pool_stats_operations = { .open = nfsd_pool_stats_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; DEFINE_SHOW_ATTRIBUTE(nfsd_reply_cache_stats); DEFINE_SHOW_ATTRIBUTE(nfsd_file_cache_stats); /*----------------------------------------------------------------------------*/ /* * payload - write methods */ static inline struct net *netns(struct file *file) { return file_inode(file)->i_sb->s_fs_info; } /* * write_unlock_ip - Release all locks used by a client * * Experimental. * * Input: * buf: '\n'-terminated C string containing a * presentation format IP address * size: length of C string in @buf * Output: * On success: returns zero if all specified locks were released; * returns one if one or more locks were not released * On error: return code is negative errno value */ static ssize_t write_unlock_ip(struct file *file, char *buf, size_t size) { struct sockaddr_storage address; struct sockaddr *sap = (struct sockaddr *)&address; size_t salen = sizeof(address); char *fo_path; struct net *net = netns(file); /* sanity check */ if (size == 0) return -EINVAL; if (buf[size-1] != '\n') return -EINVAL; fo_path = buf; if (qword_get(&buf, fo_path, size) < 0) return -EINVAL; if (rpc_pton(net, fo_path, size, sap, salen) == 0) return -EINVAL; trace_nfsd_ctl_unlock_ip(net, buf); return nlmsvc_unlock_all_by_ip(sap); } /* * write_unlock_fs - Release all locks on a local file system * * Experimental. * * Input: * buf: '\n'-terminated C string containing the * absolute pathname of a local file system * size: length of C string in @buf * Output: * On success: returns zero if all specified locks were released; * returns one if one or more locks were not released * On error: return code is negative errno value */ static ssize_t write_unlock_fs(struct file *file, char *buf, size_t size) { struct path path; char *fo_path; int error; /* sanity check */ if (size == 0) return -EINVAL; if (buf[size-1] != '\n') return -EINVAL; fo_path = buf; if (qword_get(&buf, fo_path, size) < 0) return -EINVAL; trace_nfsd_ctl_unlock_fs(netns(file), fo_path); error = kern_path(fo_path, 0, &path); if (error) return error; /* * XXX: Needs better sanity checking. Otherwise we could end up * releasing locks on the wrong file system. * * For example: * 1. Does the path refer to a directory? * 2. Is that directory a mount point, or * 3. Is that directory the root of an exported file system? */ error = nlmsvc_unlock_all_by_sb(path.dentry->d_sb); nfsd4_revoke_states(netns(file), path.dentry->d_sb); path_put(&path); return error; } /* * write_filehandle - Get a variable-length NFS file handle by path * * On input, the buffer contains a '\n'-terminated C string comprised of * three alphanumeric words separated by whitespace. The string may * contain escape sequences. * * Input: * buf: * domain: client domain name * path: export pathname * maxsize: numeric maximum size of * @buf * size: length of C string in @buf * Output: * On success: passed-in buffer filled with '\n'-terminated C * string containing a ASCII hex text version * of the NFS file handle; * return code is the size in bytes of the string * On error: return code is negative errno value */ static ssize_t write_filehandle(struct file *file, char *buf, size_t size) { char *dname, *path; int maxsize; char *mesg = buf; int len; struct auth_domain *dom; struct knfsd_fh fh; if (size == 0) return -EINVAL; if (buf[size-1] != '\n') return -EINVAL; buf[size-1] = 0; dname = mesg; len = qword_get(&mesg, dname, size); if (len <= 0) return -EINVAL; path = dname+len+1; len = qword_get(&mesg, path, size); if (len <= 0) return -EINVAL; len = get_int(&mesg, &maxsize); if (len) return len; if (maxsize < NFS_FHSIZE) return -EINVAL; maxsize = min(maxsize, NFS3_FHSIZE); if (qword_get(&mesg, mesg, size) > 0) return -EINVAL; trace_nfsd_ctl_filehandle(netns(file), dname, path, maxsize); /* we have all the words, they are in buf.. */ dom = unix_domain_find(dname); if (!dom) return -ENOMEM; len = exp_rootfh(netns(file), dom, path, &fh, maxsize); auth_domain_put(dom); if (len) return len; mesg = buf; len = SIMPLE_TRANSACTION_LIMIT; qword_addhex(&mesg, &len, fh.fh_raw, fh.fh_size); mesg[-1] = '\n'; return mesg - buf; } /* * write_threads - Start NFSD, or report the current number of running threads * * Input: * buf: ignored * size: zero * Output: * On success: passed-in buffer filled with '\n'-terminated C * string numeric value representing the number of * running NFSD threads; * return code is the size in bytes of the string * On error: return code is zero * * OR * * Input: * buf: C string containing an unsigned * integer value representing the * number of NFSD threads to start * size: non-zero length of C string in @buf * Output: * On success: NFS service is started; * passed-in buffer filled with '\n'-terminated C * string numeric value representing the number of * running NFSD threads; * return code is the size in bytes of the string * On error: return code is zero or a negative errno value */ static ssize_t write_threads(struct file *file, char *buf, size_t size) { char *mesg = buf; int rv; struct net *net = netns(file); if (size > 0) { int newthreads; rv = get_int(&mesg, &newthreads); if (rv) return rv; if (newthreads < 0) return -EINVAL; trace_nfsd_ctl_threads(net, newthreads); mutex_lock(&nfsd_mutex); rv = nfsd_svc(1, &newthreads, net, file->f_cred, NULL); mutex_unlock(&nfsd_mutex); if (rv < 0) return rv; } else rv = nfsd_nrthreads(net); return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%d\n", rv); } /* * write_pool_threads - Set or report the current number of threads per pool * * Input: * buf: ignored * size: zero * * OR * * Input: * buf: C string containing whitespace- * separated unsigned integer values * representing the number of NFSD * threads to start in each pool * size: non-zero length of C string in @buf * Output: * On success: passed-in buffer filled with '\n'-terminated C * string containing integer values representing the * number of NFSD threads in each pool; * return code is the size in bytes of the string * On error: return code is zero or a negative errno value */ static ssize_t write_pool_threads(struct file *file, char *buf, size_t size) { /* if size > 0, look for an array of number of threads per node * and apply them then write out number of threads per node as reply */ char *mesg = buf; int i; int rv; int len; int npools; int *nthreads; struct net *net = netns(file); mutex_lock(&nfsd_mutex); npools = nfsd_nrpools(net); if (npools == 0) { /* * NFS is shut down. The admin can start it by * writing to the threads file but NOT the pool_threads * file, sorry. Report zero threads. */ mutex_unlock(&nfsd_mutex); strcpy(buf, "0\n"); return strlen(buf); } nthreads = kcalloc(npools, sizeof(int), GFP_KERNEL); rv = -ENOMEM; if (nthreads == NULL) goto out_free; if (size > 0) { for (i = 0; i < npools; i++) { rv = get_int(&mesg, &nthreads[i]); if (rv == -ENOENT) break; /* fewer numbers than pools */ if (rv) goto out_free; /* syntax error */ rv = -EINVAL; if (nthreads[i] < 0) goto out_free; trace_nfsd_ctl_pool_threads(net, i, nthreads[i]); } /* * There must always be a thread in pool 0; the admin * can't shut down NFS completely using pool_threads. */ if (nthreads[0] == 0) nthreads[0] = 1; rv = nfsd_set_nrthreads(i, nthreads, net); if (rv) goto out_free; } rv = nfsd_get_nrthreads(npools, nthreads, net); if (rv) goto out_free; mesg = buf; size = SIMPLE_TRANSACTION_LIMIT; for (i = 0; i < npools && size > 0; i++) { snprintf(mesg, size, "%d%c", nthreads[i], (i == npools-1 ? '\n' : ' ')); len = strlen(mesg); size -= len; mesg += len; } rv = mesg - buf; out_free: kfree(nthreads); mutex_unlock(&nfsd_mutex); return rv; } static ssize_t nfsd_print_version_support(struct nfsd_net *nn, char *buf, int remaining, const char *sep, unsigned vers, int minor) { const char *format = minor < 0 ? "%s%c%u" : "%s%c%u.%u"; bool supported = !!nfsd_vers(nn, vers, NFSD_TEST); if (vers == 4 && minor >= 0 && !nfsd_minorversion(nn, minor, NFSD_TEST)) supported = false; if (minor == 0 && supported) /* * special case for backward compatability. * +4.0 is never reported, it is implied by * +4, unless -4.0 is present. */ return 0; return snprintf(buf, remaining, format, sep, supported ? '+' : '-', vers, minor); } static ssize_t __write_versions(struct file *file, char *buf, size_t size) { char *mesg = buf; char *vers, *minorp, sign; int len, num, remaining; ssize_t tlen = 0; char *sep; struct nfsd_net *nn = net_generic(netns(file), nfsd_net_id); if (size > 0) { if (nn->nfsd_serv) /* Cannot change versions without updating * nn->nfsd_serv->sv_xdrsize, and reallocing * rq_argp and rq_resp */ return -EBUSY; if (buf[size-1] != '\n') return -EINVAL; buf[size-1] = 0; trace_nfsd_ctl_version(netns(file), buf); vers = mesg; len = qword_get(&mesg, vers, size); if (len <= 0) return -EINVAL; do { enum vers_op cmd; unsigned minor; sign = *vers; if (sign == '+' || sign == '-') num = simple_strtol((vers+1), &minorp, 0); else num = simple_strtol(vers, &minorp, 0); if (*minorp == '.') { if (num != 4) return -EINVAL; if (kstrtouint(minorp+1, 0, &minor) < 0) return -EINVAL; } cmd = sign == '-' ? NFSD_CLEAR : NFSD_SET; switch(num) { #ifdef CONFIG_NFSD_V2 case 2: #endif case 3: nfsd_vers(nn, num, cmd); break; case 4: if (*minorp == '.') { if (nfsd_minorversion(nn, minor, cmd) < 0) return -EINVAL; } else if ((cmd == NFSD_SET) != nfsd_vers(nn, num, NFSD_TEST)) { /* * Either we have +4 and no minors are enabled, * or we have -4 and at least one minor is enabled. * In either case, propagate 'cmd' to all minors. */ minor = 0; while (nfsd_minorversion(nn, minor, cmd) >= 0) minor++; } break; default: /* Ignore requests to disable non-existent versions */ if (cmd == NFSD_SET) return -EINVAL; } vers += len + 1; } while ((len = qword_get(&mesg, vers, size)) > 0); /* If all get turned off, turn them back on, as * having no versions is BAD */ nfsd_reset_versions(nn); } /* Now write current state into reply buffer */ sep = ""; remaining = SIMPLE_TRANSACTION_LIMIT; for (num=2 ; num <= 4 ; num++) { int minor; if (!nfsd_vers(nn, num, NFSD_AVAIL)) continue; minor = -1; do { len = nfsd_print_version_support(nn, buf, remaining, sep, num, minor); if (len >= remaining) goto out; remaining -= len; buf += len; tlen += len; minor++; if (len) sep = " "; } while (num == 4 && minor <= NFSD_SUPPORTED_MINOR_VERSION); } out: len = snprintf(buf, remaining, "\n"); if (len >= remaining) return -EINVAL; return tlen + len; } /* * write_versions - Set or report the available NFS protocol versions * * Input: * buf: ignored * size: zero * Output: * On success: passed-in buffer filled with '\n'-terminated C * string containing positive or negative integer * values representing the current status of each * protocol version; * return code is the size in bytes of the string * On error: return code is zero or a negative errno value * * OR * * Input: * buf: C string containing whitespace- * separated positive or negative * integer values representing NFS * protocol versions to enable ("+n") * or disable ("-n") * size: non-zero length of C string in @buf * Output: * On success: status of zero or more protocol versions has * been updated; passed-in buffer filled with * '\n'-terminated C string containing positive * or negative integer values representing the * current status of each protocol version; * return code is the size in bytes of the string * On error: return code is zero or a negative errno value */ static ssize_t write_versions(struct file *file, char *buf, size_t size) { ssize_t rv; mutex_lock(&nfsd_mutex); rv = __write_versions(file, buf, size); mutex_unlock(&nfsd_mutex); return rv; } /* * Zero-length write. Return a list of NFSD's current listener * transports. */ static ssize_t __write_ports_names(char *buf, struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); if (nn->nfsd_serv == NULL) return 0; return svc_xprt_names(nn->nfsd_serv, buf, SIMPLE_TRANSACTION_LIMIT); } /* * A single 'fd' number was written, in which case it must be for * a socket of a supported family/protocol, and we use it as an * nfsd listener. */ static ssize_t __write_ports_addfd(char *buf, struct net *net, const struct cred *cred) { char *mesg = buf; int fd, err; struct nfsd_net *nn = net_generic(net, nfsd_net_id); struct svc_serv *serv; err = get_int(&mesg, &fd); if (err != 0 || fd < 0) return -EINVAL; trace_nfsd_ctl_ports_addfd(net, fd); err = nfsd_create_serv(net); if (err != 0) return err; serv = nn->nfsd_serv; err = svc_addsock(serv, net, fd, buf, SIMPLE_TRANSACTION_LIMIT, cred); if (!serv->sv_nrthreads && list_empty(&nn->nfsd_serv->sv_permsocks)) nfsd_destroy_serv(net); return err; } /* * A transport listener is added by writing its transport name and * a port number. */ static ssize_t __write_ports_addxprt(char *buf, struct net *net, const struct cred *cred) { char transport[16]; struct svc_xprt *xprt; int port, err; struct nfsd_net *nn = net_generic(net, nfsd_net_id); struct svc_serv *serv; if (sscanf(buf, "%15s %5u", transport, &port) != 2) return -EINVAL; if (port < 1 || port > USHRT_MAX) return -EINVAL; trace_nfsd_ctl_ports_addxprt(net, transport, port); err = nfsd_create_serv(net); if (err != 0) return err; serv = nn->nfsd_serv; err = svc_xprt_create(serv, transport, net, PF_INET, port, SVC_SOCK_ANONYMOUS, cred); if (err < 0) goto out_err; err = svc_xprt_create(serv, transport, net, PF_INET6, port, SVC_SOCK_ANONYMOUS, cred); if (err < 0 && err != -EAFNOSUPPORT) goto out_close; return 0; out_close: xprt = svc_find_xprt(serv, transport, net, PF_INET, port); if (xprt != NULL) { svc_xprt_close(xprt); svc_xprt_put(xprt); } out_err: if (!serv->sv_nrthreads && list_empty(&nn->nfsd_serv->sv_permsocks)) nfsd_destroy_serv(net); return err; } static ssize_t __write_ports(struct file *file, char *buf, size_t size, struct net *net) { if (size == 0) return __write_ports_names(buf, net); if (isdigit(buf[0])) return __write_ports_addfd(buf, net, file->f_cred); if (isalpha(buf[0])) return __write_ports_addxprt(buf, net, file->f_cred); return -EINVAL; } /* * write_ports - Pass a socket file descriptor or transport name to listen on * * Input: * buf: ignored * size: zero * Output: * On success: passed-in buffer filled with a '\n'-terminated C * string containing a whitespace-separated list of * named NFSD listeners; * return code is the size in bytes of the string * On error: return code is zero or a negative errno value * * OR * * Input: * buf: C string containing an unsigned * integer value representing a bound * but unconnected socket that is to be * used as an NFSD listener; listen(3) * must be called for a SOCK_STREAM * socket, otherwise it is ignored * size: non-zero length of C string in @buf * Output: * On success: NFS service is started; * passed-in buffer filled with a '\n'-terminated C * string containing a unique alphanumeric name of * the listener; * return code is the size in bytes of the string * On error: return code is a negative errno value * * OR * * Input: * buf: C string containing a transport * name and an unsigned integer value * representing the port to listen on, * separated by whitespace * size: non-zero length of C string in @buf * Output: * On success: returns zero; NFS service is started * On error: return code is a negative errno value */ static ssize_t write_ports(struct file *file, char *buf, size_t size) { ssize_t rv; mutex_lock(&nfsd_mutex); rv = __write_ports(file, buf, size, netns(file)); mutex_unlock(&nfsd_mutex); return rv; } int nfsd_max_blksize; /* * write_maxblksize - Set or report the current NFS blksize * * Input: * buf: ignored * size: zero * * OR * * Input: * buf: C string containing an unsigned * integer value representing the new * NFS blksize * size: non-zero length of C string in @buf * Output: * On success: passed-in buffer filled with '\n'-terminated C string * containing numeric value of the current NFS blksize * setting; * return code is the size in bytes of the string * On error: return code is zero or a negative errno value */ static ssize_t write_maxblksize(struct file *file, char *buf, size_t size) { char *mesg = buf; struct nfsd_net *nn = net_generic(netns(file), nfsd_net_id); if (size > 0) { int bsize; int rv = get_int(&mesg, &bsize); if (rv) return rv; trace_nfsd_ctl_maxblksize(netns(file), bsize); /* force bsize into allowed range and * required alignment. */ bsize = max_t(int, bsize, 1024); bsize = min_t(int, bsize, NFSSVC_MAXBLKSIZE); bsize &= ~(1024-1); mutex_lock(&nfsd_mutex); if (nn->nfsd_serv) { mutex_unlock(&nfsd_mutex); return -EBUSY; } nfsd_max_blksize = bsize; mutex_unlock(&nfsd_mutex); } return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%d\n", nfsd_max_blksize); } #ifdef CONFIG_NFSD_V4 static ssize_t __nfsd4_write_time(struct file *file, char *buf, size_t size, time64_t *time, struct nfsd_net *nn) { struct dentry *dentry = file_dentry(file); char *mesg = buf; int rv, i; if (size > 0) { if (nn->nfsd_serv) return -EBUSY; rv = get_int(&mesg, &i); if (rv) return rv; trace_nfsd_ctl_time(netns(file), dentry->d_name.name, dentry->d_name.len, i); /* * Some sanity checking. We don't have a reason for * these particular numbers, but problems with the * extremes are: * - Too short: the briefest network outage may * cause clients to lose all their locks. Also, * the frequent polling may be wasteful. * - Too long: do you really want reboot recovery * to take more than an hour? Or to make other * clients wait an hour before being able to * revoke a dead client's locks? */ if (i < 10 || i > 3600) return -EINVAL; *time = i; } return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%lld\n", *time); } static ssize_t nfsd4_write_time(struct file *file, char *buf, size_t size, time64_t *time, struct nfsd_net *nn) { ssize_t rv; mutex_lock(&nfsd_mutex); rv = __nfsd4_write_time(file, buf, size, time, nn); mutex_unlock(&nfsd_mutex); return rv; } /* * write_leasetime - Set or report the current NFSv4 lease time * * Input: * buf: ignored * size: zero * * OR * * Input: * buf: C string containing an unsigned * integer value representing the new * NFSv4 lease expiry time * size: non-zero length of C string in @buf * Output: * On success: passed-in buffer filled with '\n'-terminated C * string containing unsigned integer value of the * current lease expiry time; * return code is the size in bytes of the string * On error: return code is zero or a negative errno value */ static ssize_t write_leasetime(struct file *file, char *buf, size_t size) { struct nfsd_net *nn = net_generic(netns(file), nfsd_net_id); return nfsd4_write_time(file, buf, size, &nn->nfsd4_lease, nn); } /* * write_gracetime - Set or report current NFSv4 grace period time * * As above, but sets the time of the NFSv4 grace period. * * Note this should never be set to less than the *previous* * lease-period time, but we don't try to enforce this. (In the common * case (a new boot), we don't know what the previous lease time was * anyway.) */ static ssize_t write_gracetime(struct file *file, char *buf, size_t size) { struct nfsd_net *nn = net_generic(netns(file), nfsd_net_id); return nfsd4_write_time(file, buf, size, &nn->nfsd4_grace, nn); } #ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING static ssize_t __write_recoverydir(struct file *file, char *buf, size_t size, struct nfsd_net *nn) { char *mesg = buf; char *recdir; int len, status; if (size > 0) { if (nn->nfsd_serv) return -EBUSY; if (size > PATH_MAX || buf[size-1] != '\n') return -EINVAL; buf[size-1] = 0; recdir = mesg; len = qword_get(&mesg, recdir, size); if (len <= 0) return -EINVAL; trace_nfsd_ctl_recoverydir(netns(file), recdir); status = nfs4_reset_recoverydir(recdir); if (status) return status; } return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%s\n", nfs4_recoverydir()); } /* * write_recoverydir - Set or report the pathname of the recovery directory * * Input: * buf: ignored * size: zero * * OR * * Input: * buf: C string containing the pathname * of the directory on a local file * system containing permanent NFSv4 * recovery data * size: non-zero length of C string in @buf * Output: * On success: passed-in buffer filled with '\n'-terminated C string * containing the current recovery pathname setting; * return code is the size in bytes of the string * On error: return code is zero or a negative errno value */ static ssize_t write_recoverydir(struct file *file, char *buf, size_t size) { ssize_t rv; struct nfsd_net *nn = net_generic(netns(file), nfsd_net_id); mutex_lock(&nfsd_mutex); rv = __write_recoverydir(file, buf, size, nn); mutex_unlock(&nfsd_mutex); return rv; } #endif /* * write_v4_end_grace - release grace period for nfsd's v4.x lock manager * * Input: * buf: ignored * size: zero * OR * * Input: * buf: any value * size: non-zero length of C string in @buf * Output: * passed-in buffer filled with "Y" or "N" with a newline * and NULL-terminated C string. This indicates whether * the grace period has ended in the current net * namespace. Return code is the size in bytes of the * string. Writing a string that starts with 'Y', 'y', or * '1' to the file will end the grace period for nfsd's v4 * lock manager. */ static ssize_t write_v4_end_grace(struct file *file, char *buf, size_t size) { struct nfsd_net *nn = net_generic(netns(file), nfsd_net_id); if (size > 0) { switch(buf[0]) { case 'Y': case 'y': case '1': if (!nn->nfsd_serv) return -EBUSY; trace_nfsd_end_grace(netns(file)); nfsd4_end_grace(nn); break; default: return -EINVAL; } } return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%c\n", nn->grace_ended ? 'Y' : 'N'); } #endif /*----------------------------------------------------------------------------*/ /* * populating the filesystem. */ /* Basically copying rpc_get_inode. */ static struct inode *nfsd_get_inode(struct super_block *sb, umode_t mode) { struct inode *inode = new_inode(sb); if (!inode) return NULL; /* Following advice from simple_fill_super documentation: */ inode->i_ino = iunique(sb, NFSD_MaxReserved); inode->i_mode = mode; simple_inode_init_ts(inode); switch (mode & S_IFMT) { case S_IFDIR: inode->i_fop = &simple_dir_operations; inode->i_op = &simple_dir_inode_operations; inc_nlink(inode); break; case S_IFLNK: inode->i_op = &simple_symlink_inode_operations; break; default: break; } return inode; } static int __nfsd_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode, struct nfsdfs_client *ncl) { struct inode *inode; inode = nfsd_get_inode(dir->i_sb, mode); if (!inode) return -ENOMEM; if (ncl) { inode->i_private = ncl; kref_get(&ncl->cl_ref); } d_add(dentry, inode); inc_nlink(dir); fsnotify_mkdir(dir, dentry); return 0; } static struct dentry *nfsd_mkdir(struct dentry *parent, struct nfsdfs_client *ncl, char *name) { struct inode *dir = parent->d_inode; struct dentry *dentry; int ret = -ENOMEM; inode_lock(dir); dentry = d_alloc_name(parent, name); if (!dentry) goto out_err; ret = __nfsd_mkdir(d_inode(parent), dentry, S_IFDIR | 0600, ncl); if (ret) goto out_err; out: inode_unlock(dir); return dentry; out_err: dput(dentry); dentry = ERR_PTR(ret); goto out; } #if IS_ENABLED(CONFIG_SUNRPC_GSS) static int __nfsd_symlink(struct inode *dir, struct dentry *dentry, umode_t mode, const char *content) { struct inode *inode; inode = nfsd_get_inode(dir->i_sb, mode); if (!inode) return -ENOMEM; inode->i_link = (char *)content; inode->i_size = strlen(content); d_add(dentry, inode); inc_nlink(dir); fsnotify_create(dir, dentry); return 0; } /* * @content is assumed to be a NUL-terminated string that lives * longer than the symlink itself. */ static void _nfsd_symlink(struct dentry *parent, const char *name, const char *content) { struct inode *dir = parent->d_inode; struct dentry *dentry; int ret; inode_lock(dir); dentry = d_alloc_name(parent, name); if (!dentry) goto out; ret = __nfsd_symlink(d_inode(parent), dentry, S_IFLNK | 0777, content); if (ret) dput(dentry); out: inode_unlock(dir); } #else static inline void _nfsd_symlink(struct dentry *parent, const char *name, const char *content) { } #endif static void clear_ncl(struct dentry *dentry) { struct inode *inode = d_inode(dentry); struct nfsdfs_client *ncl = inode->i_private; spin_lock(&inode->i_lock); inode->i_private = NULL; spin_unlock(&inode->i_lock); kref_put(&ncl->cl_ref, ncl->cl_release); } struct nfsdfs_client *get_nfsdfs_client(struct inode *inode) { struct nfsdfs_client *nc; spin_lock(&inode->i_lock); nc = inode->i_private; if (nc) kref_get(&nc->cl_ref); spin_unlock(&inode->i_lock); return nc; } /* XXX: cut'n'paste from simple_fill_super; figure out if we could share * code instead. */ static int nfsdfs_create_files(struct dentry *root, const struct tree_descr *files, struct nfsdfs_client *ncl, struct dentry **fdentries) { struct inode *dir = d_inode(root); struct inode *inode; struct dentry *dentry; int i; inode_lock(dir); for (i = 0; files->name && files->name[0]; i++, files++) { dentry = d_alloc_name(root, files->name); if (!dentry) goto out; inode = nfsd_get_inode(d_inode(root)->i_sb, S_IFREG | files->mode); if (!inode) { dput(dentry); goto out; } kref_get(&ncl->cl_ref); inode->i_fop = files->ops; inode->i_private = ncl; d_add(dentry, inode); fsnotify_create(dir, dentry); if (fdentries) fdentries[i] = dentry; } inode_unlock(dir); return 0; out: inode_unlock(dir); return -ENOMEM; } /* on success, returns positive number unique to that client. */ struct dentry *nfsd_client_mkdir(struct nfsd_net *nn, struct nfsdfs_client *ncl, u32 id, const struct tree_descr *files, struct dentry **fdentries) { struct dentry *dentry; char name[11]; int ret; sprintf(name, "%u", id); dentry = nfsd_mkdir(nn->nfsd_client_dir, ncl, name); if (IS_ERR(dentry)) /* XXX: tossing errors? */ return NULL; ret = nfsdfs_create_files(dentry, files, ncl, fdentries); if (ret) { nfsd_client_rmdir(dentry); return NULL; } return dentry; } /* Taken from __rpc_rmdir: */ void nfsd_client_rmdir(struct dentry *dentry) { simple_recursive_removal(dentry, clear_ncl); } static int nfsd_fill_super(struct super_block *sb, struct fs_context *fc) { struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id); struct dentry *dentry; int ret; static const struct tree_descr nfsd_files[] = { [NFSD_List] = {"exports", &exports_nfsd_operations, S_IRUGO}, /* Per-export io stats use same ops as exports file */ [NFSD_Export_Stats] = {"export_stats", &exports_nfsd_operations, S_IRUGO}, [NFSD_Export_features] = {"export_features", &export_features_fops, S_IRUGO}, [NFSD_FO_UnlockIP] = {"unlock_ip", &transaction_ops, S_IWUSR|S_IRUSR}, [NFSD_FO_UnlockFS] = {"unlock_filesystem", &transaction_ops, S_IWUSR|S_IRUSR}, [NFSD_Fh] = {"filehandle", &transaction_ops, S_IWUSR|S_IRUSR}, [NFSD_Threads] = {"threads", &transaction_ops, S_IWUSR|S_IRUSR}, [NFSD_Pool_Threads] = {"pool_threads", &transaction_ops, S_IWUSR|S_IRUSR}, [NFSD_Pool_Stats] = {"pool_stats", &pool_stats_operations, S_IRUGO}, [NFSD_Reply_Cache_Stats] = {"reply_cache_stats", &nfsd_reply_cache_stats_fops, S_IRUGO}, [NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR}, [NFSD_Ports] = {"portlist", &transaction_ops, S_IWUSR|S_IRUGO}, [NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO}, [NFSD_Filecache] = {"filecache", &nfsd_file_cache_stats_fops, S_IRUGO}, #ifdef CONFIG_NFSD_V4 [NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR}, [NFSD_Gracetime] = {"nfsv4gracetime", &transaction_ops, S_IWUSR|S_IRUSR}, #ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING [NFSD_RecoveryDir] = {"nfsv4recoverydir", &transaction_ops, S_IWUSR|S_IRUSR}, #endif [NFSD_V4EndGrace] = {"v4_end_grace", &transaction_ops, S_IWUSR|S_IRUGO}, #endif /* last one */ {""} }; ret = simple_fill_super(sb, 0x6e667364, nfsd_files); if (ret) return ret; _nfsd_symlink(sb->s_root, "supported_krb5_enctypes", "/proc/net/rpc/gss_krb5_enctypes"); dentry = nfsd_mkdir(sb->s_root, NULL, "clients"); if (IS_ERR(dentry)) return PTR_ERR(dentry); nn->nfsd_client_dir = dentry; return 0; } static int nfsd_fs_get_tree(struct fs_context *fc) { return get_tree_keyed(fc, nfsd_fill_super, get_net(fc->net_ns)); } static void nfsd_fs_free_fc(struct fs_context *fc) { if (fc->s_fs_info) put_net(fc->s_fs_info); } static const struct fs_context_operations nfsd_fs_context_ops = { .free = nfsd_fs_free_fc, .get_tree = nfsd_fs_get_tree, }; static int nfsd_init_fs_context(struct fs_context *fc) { put_user_ns(fc->user_ns); fc->user_ns = get_user_ns(fc->net_ns->user_ns); fc->ops = &nfsd_fs_context_ops; return 0; } static void nfsd_umount(struct super_block *sb) { struct net *net = sb->s_fs_info; nfsd_shutdown_threads(net); kill_litter_super(sb); put_net(net); } static struct file_system_type nfsd_fs_type = { .owner = THIS_MODULE, .name = "nfsd", .init_fs_context = nfsd_init_fs_context, .kill_sb = nfsd_umount, }; MODULE_ALIAS_FS("nfsd"); #ifdef CONFIG_PROC_FS static int exports_proc_open(struct inode *inode, struct file *file) { return exports_net_open(current->nsproxy->net_ns, file); } static const struct proc_ops exports_proc_ops = { .proc_open = exports_proc_open, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_release = seq_release, }; static int create_proc_exports_entry(void) { struct proc_dir_entry *entry; entry = proc_mkdir("fs/nfs", NULL); if (!entry) return -ENOMEM; entry = proc_create("exports", 0, entry, &exports_proc_ops); if (!entry) { remove_proc_entry("fs/nfs", NULL); return -ENOMEM; } return 0; } #else /* CONFIG_PROC_FS */ static int create_proc_exports_entry(void) { return 0; } #endif unsigned int nfsd_net_id; static int nfsd_genl_rpc_status_compose_msg(struct sk_buff *skb, struct netlink_callback *cb, struct nfsd_genl_rqstp *rqstp) { void *hdr; u32 i; hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &nfsd_nl_family, 0, NFSD_CMD_RPC_STATUS_GET); if (!hdr) return -ENOBUFS; if (nla_put_be32(skb, NFSD_A_RPC_STATUS_XID, rqstp->rq_xid) || nla_put_u32(skb, NFSD_A_RPC_STATUS_FLAGS, rqstp->rq_flags) || nla_put_u32(skb, NFSD_A_RPC_STATUS_PROG, rqstp->rq_prog) || nla_put_u32(skb, NFSD_A_RPC_STATUS_PROC, rqstp->rq_proc) || nla_put_u8(skb, NFSD_A_RPC_STATUS_VERSION, rqstp->rq_vers) || nla_put_s64(skb, NFSD_A_RPC_STATUS_SERVICE_TIME, ktime_to_us(rqstp->rq_stime), NFSD_A_RPC_STATUS_PAD)) return -ENOBUFS; switch (rqstp->rq_saddr.sa_family) { case AF_INET: { const struct sockaddr_in *s_in, *d_in; s_in = (const struct sockaddr_in *)&rqstp->rq_saddr; d_in = (const struct sockaddr_in *)&rqstp->rq_daddr; if (nla_put_in_addr(skb, NFSD_A_RPC_STATUS_SADDR4, s_in->sin_addr.s_addr) || nla_put_in_addr(skb, NFSD_A_RPC_STATUS_DADDR4, d_in->sin_addr.s_addr) || nla_put_be16(skb, NFSD_A_RPC_STATUS_SPORT, s_in->sin_port) || nla_put_be16(skb, NFSD_A_RPC_STATUS_DPORT, d_in->sin_port)) return -ENOBUFS; break; } case AF_INET6: { const struct sockaddr_in6 *s_in, *d_in; s_in = (const struct sockaddr_in6 *)&rqstp->rq_saddr; d_in = (const struct sockaddr_in6 *)&rqstp->rq_daddr; if (nla_put_in6_addr(skb, NFSD_A_RPC_STATUS_SADDR6, &s_in->sin6_addr) || nla_put_in6_addr(skb, NFSD_A_RPC_STATUS_DADDR6, &d_in->sin6_addr) || nla_put_be16(skb, NFSD_A_RPC_STATUS_SPORT, s_in->sin6_port) || nla_put_be16(skb, NFSD_A_RPC_STATUS_DPORT, d_in->sin6_port)) return -ENOBUFS; break; } } for (i = 0; i < rqstp->rq_opcnt; i++) if (nla_put_u32(skb, NFSD_A_RPC_STATUS_COMPOUND_OPS, rqstp->rq_opnum[i])) return -ENOBUFS; genlmsg_end(skb, hdr); return 0; } /** * nfsd_nl_rpc_status_get_dumpit - Handle rpc_status_get dumpit * @skb: reply buffer * @cb: netlink metadata and command arguments * * Returns the size of the reply or a negative errno. */ int nfsd_nl_rpc_status_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { int i, ret, rqstp_index = 0; struct nfsd_net *nn; mutex_lock(&nfsd_mutex); nn = net_generic(sock_net(skb->sk), nfsd_net_id); if (!nn->nfsd_serv) { ret = -ENODEV; goto out_unlock; } rcu_read_lock(); for (i = 0; i < nn->nfsd_serv->sv_nrpools; i++) { struct svc_rqst *rqstp; if (i < cb->args[0]) /* already consumed */ continue; rqstp_index = 0; list_for_each_entry_rcu(rqstp, &nn->nfsd_serv->sv_pools[i].sp_all_threads, rq_all) { struct nfsd_genl_rqstp genl_rqstp; unsigned int status_counter; if (rqstp_index++ < cb->args[1]) /* already consumed */ continue; /* * Acquire rq_status_counter before parsing the rqst * fields. rq_status_counter is set to an odd value in * order to notify the consumers the rqstp fields are * meaningful. */ status_counter = smp_load_acquire(&rqstp->rq_status_counter); if (!(status_counter & 1)) continue; genl_rqstp.rq_xid = rqstp->rq_xid; genl_rqstp.rq_flags = rqstp->rq_flags; genl_rqstp.rq_vers = rqstp->rq_vers; genl_rqstp.rq_prog = rqstp->rq_prog; genl_rqstp.rq_proc = rqstp->rq_proc; genl_rqstp.rq_stime = rqstp->rq_stime; genl_rqstp.rq_opcnt = 0; memcpy(&genl_rqstp.rq_daddr, svc_daddr(rqstp), sizeof(struct sockaddr)); memcpy(&genl_rqstp.rq_saddr, svc_addr(rqstp), sizeof(struct sockaddr)); #ifdef CONFIG_NFSD_V4 if (rqstp->rq_vers == NFS4_VERSION && rqstp->rq_proc == NFSPROC4_COMPOUND) { /* NFSv4 compound */ struct nfsd4_compoundargs *args; int j; args = rqstp->rq_argp; genl_rqstp.rq_opcnt = args->opcnt; for (j = 0; j < genl_rqstp.rq_opcnt; j++) genl_rqstp.rq_opnum[j] = args->ops[j].opnum; } #endif /* CONFIG_NFSD_V4 */ /* * Acquire rq_status_counter before reporting the rqst * fields to the user. */ if (smp_load_acquire(&rqstp->rq_status_counter) != status_counter) continue; ret = nfsd_genl_rpc_status_compose_msg(skb, cb, &genl_rqstp); if (ret) goto out; } } cb->args[0] = i; cb->args[1] = rqstp_index; ret = skb->len; out: rcu_read_unlock(); out_unlock: mutex_unlock(&nfsd_mutex); return ret; } /** * nfsd_nl_threads_set_doit - set the number of running threads * @skb: reply buffer * @info: netlink metadata and command arguments * * Return 0 on success or a negative errno. */ int nfsd_nl_threads_set_doit(struct sk_buff *skb, struct genl_info *info) { int *nthreads, count = 0, nrpools, i, ret = -EOPNOTSUPP, rem; struct net *net = genl_info_net(info); struct nfsd_net *nn = net_generic(net, nfsd_net_id); const struct nlattr *attr; const char *scope = NULL; if (GENL_REQ_ATTR_CHECK(info, NFSD_A_SERVER_THREADS)) return -EINVAL; /* count number of SERVER_THREADS values */ nlmsg_for_each_attr(attr, info->nlhdr, GENL_HDRLEN, rem) { if (nla_type(attr) == NFSD_A_SERVER_THREADS) count++; } mutex_lock(&nfsd_mutex); nrpools = max(count, nfsd_nrpools(net)); nthreads = kcalloc(nrpools, sizeof(int), GFP_KERNEL); if (!nthreads) { ret = -ENOMEM; goto out_unlock; } i = 0; nlmsg_for_each_attr(attr, info->nlhdr, GENL_HDRLEN, rem) { if (nla_type(attr) == NFSD_A_SERVER_THREADS) { nthreads[i++] = nla_get_u32(attr); if (i >= nrpools) break; } } if (info->attrs[NFSD_A_SERVER_GRACETIME] || info->attrs[NFSD_A_SERVER_LEASETIME] || info->attrs[NFSD_A_SERVER_SCOPE]) { ret = -EBUSY; if (nn->nfsd_serv && nn->nfsd_serv->sv_nrthreads) goto out_unlock; ret = -EINVAL; attr = info->attrs[NFSD_A_SERVER_GRACETIME]; if (attr) { u32 gracetime = nla_get_u32(attr); if (gracetime < 10 || gracetime > 3600) goto out_unlock; nn->nfsd4_grace = gracetime; } attr = info->attrs[NFSD_A_SERVER_LEASETIME]; if (attr) { u32 leasetime = nla_get_u32(attr); if (leasetime < 10 || leasetime > 3600) goto out_unlock; nn->nfsd4_lease = leasetime; } attr = info->attrs[NFSD_A_SERVER_SCOPE]; if (attr) scope = nla_data(attr); } ret = nfsd_svc(nrpools, nthreads, net, get_current_cred(), scope); if (ret > 0) ret = 0; out_unlock: mutex_unlock(&nfsd_mutex); kfree(nthreads); return ret; } /** * nfsd_nl_threads_get_doit - get the number of running threads * @skb: reply buffer * @info: netlink metadata and command arguments * * Return 0 on success or a negative errno. */ int nfsd_nl_threads_get_doit(struct sk_buff *skb, struct genl_info *info) { struct net *net = genl_info_net(info); struct nfsd_net *nn = net_generic(net, nfsd_net_id); void *hdr; int err; skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) return -ENOMEM; hdr = genlmsg_iput(skb, info); if (!hdr) { err = -EMSGSIZE; goto err_free_msg; } mutex_lock(&nfsd_mutex); err = nla_put_u32(skb, NFSD_A_SERVER_GRACETIME, nn->nfsd4_grace) || nla_put_u32(skb, NFSD_A_SERVER_LEASETIME, nn->nfsd4_lease) || nla_put_string(skb, NFSD_A_SERVER_SCOPE, nn->nfsd_name); if (err) goto err_unlock; if (nn->nfsd_serv) { int i; for (i = 0; i < nfsd_nrpools(net); ++i) { struct svc_pool *sp = &nn->nfsd_serv->sv_pools[i]; err = nla_put_u32(skb, NFSD_A_SERVER_THREADS, sp->sp_nrthreads); if (err) goto err_unlock; } } else { err = nla_put_u32(skb, NFSD_A_SERVER_THREADS, 0); if (err) goto err_unlock; } mutex_unlock(&nfsd_mutex); genlmsg_end(skb, hdr); return genlmsg_reply(skb, info); err_unlock: mutex_unlock(&nfsd_mutex); err_free_msg: nlmsg_free(skb); return err; } /** * nfsd_nl_version_set_doit - set the nfs enabled versions * @skb: reply buffer * @info: netlink metadata and command arguments * * Return 0 on success or a negative errno. */ int nfsd_nl_version_set_doit(struct sk_buff *skb, struct genl_info *info) { const struct nlattr *attr; struct nfsd_net *nn; int i, rem; if (GENL_REQ_ATTR_CHECK(info, NFSD_A_SERVER_PROTO_VERSION)) return -EINVAL; mutex_lock(&nfsd_mutex); nn = net_generic(genl_info_net(info), nfsd_net_id); if (nn->nfsd_serv) { mutex_unlock(&nfsd_mutex); return -EBUSY; } /* clear current supported versions. */ nfsd_vers(nn, 2, NFSD_CLEAR); nfsd_vers(nn, 3, NFSD_CLEAR); for (i = 0; i <= NFSD_SUPPORTED_MINOR_VERSION; i++) nfsd_minorversion(nn, i, NFSD_CLEAR); nlmsg_for_each_attr(attr, info->nlhdr, GENL_HDRLEN, rem) { struct nlattr *tb[NFSD_A_VERSION_MAX + 1]; u32 major, minor = 0; bool enabled; if (nla_type(attr) != NFSD_A_SERVER_PROTO_VERSION) continue; if (nla_parse_nested(tb, NFSD_A_VERSION_MAX, attr, nfsd_version_nl_policy, info->extack) < 0) continue; if (!tb[NFSD_A_VERSION_MAJOR]) continue; major = nla_get_u32(tb[NFSD_A_VERSION_MAJOR]); if (tb[NFSD_A_VERSION_MINOR]) minor = nla_get_u32(tb[NFSD_A_VERSION_MINOR]); enabled = nla_get_flag(tb[NFSD_A_VERSION_ENABLED]); switch (major) { case 4: nfsd_minorversion(nn, minor, enabled ? NFSD_SET : NFSD_CLEAR); break; case 3: case 2: if (!minor) nfsd_vers(nn, major, enabled ? NFSD_SET : NFSD_CLEAR); break; default: break; } } mutex_unlock(&nfsd_mutex); return 0; } /** * nfsd_nl_version_get_doit - get the enabled status for all supported nfs versions * @skb: reply buffer * @info: netlink metadata and command arguments * * Return 0 on success or a negative errno. */ int nfsd_nl_version_get_doit(struct sk_buff *skb, struct genl_info *info) { struct nfsd_net *nn; int i, err; void *hdr; skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) return -ENOMEM; hdr = genlmsg_iput(skb, info); if (!hdr) { err = -EMSGSIZE; goto err_free_msg; } mutex_lock(&nfsd_mutex); nn = net_generic(genl_info_net(info), nfsd_net_id); for (i = 2; i <= 4; i++) { int j; for (j = 0; j <= NFSD_SUPPORTED_MINOR_VERSION; j++) { struct nlattr *attr; /* Don't record any versions the kernel doesn't have * compiled in */ if (!nfsd_support_version(i)) continue; /* NFSv{2,3} does not support minor numbers */ if (i < 4 && j) continue; attr = nla_nest_start(skb, NFSD_A_SERVER_PROTO_VERSION); if (!attr) { err = -EINVAL; goto err_nfsd_unlock; } if (nla_put_u32(skb, NFSD_A_VERSION_MAJOR, i) || nla_put_u32(skb, NFSD_A_VERSION_MINOR, j)) { err = -EINVAL; goto err_nfsd_unlock; } /* Set the enabled flag if the version is enabled */ if (nfsd_vers(nn, i, NFSD_TEST) && (i < 4 || nfsd_minorversion(nn, j, NFSD_TEST)) && nla_put_flag(skb, NFSD_A_VERSION_ENABLED)) { err = -EINVAL; goto err_nfsd_unlock; } nla_nest_end(skb, attr); } } mutex_unlock(&nfsd_mutex); genlmsg_end(skb, hdr); return genlmsg_reply(skb, info); err_nfsd_unlock: mutex_unlock(&nfsd_mutex); err_free_msg: nlmsg_free(skb); return err; } /** * nfsd_nl_listener_set_doit - set the nfs running sockets * @skb: reply buffer * @info: netlink metadata and command arguments * * Return 0 on success or a negative errno. */ int nfsd_nl_listener_set_doit(struct sk_buff *skb, struct genl_info *info) { struct net *net = genl_info_net(info); struct svc_xprt *xprt, *tmp; const struct nlattr *attr; struct svc_serv *serv; LIST_HEAD(permsocks); struct nfsd_net *nn; bool delete = false; int err, rem; mutex_lock(&nfsd_mutex); err = nfsd_create_serv(net); if (err) { mutex_unlock(&nfsd_mutex); return err; } nn = net_generic(net, nfsd_net_id); serv = nn->nfsd_serv; spin_lock_bh(&serv->sv_lock); /* Move all of the old listener sockets to a temp list */ list_splice_init(&serv->sv_permsocks, &permsocks); /* * Walk the list of server_socks from userland and move any that match * back to sv_permsocks */ nlmsg_for_each_attr(attr, info->nlhdr, GENL_HDRLEN, rem) { struct nlattr *tb[NFSD_A_SOCK_MAX + 1]; const char *xcl_name; struct sockaddr *sa; if (nla_type(attr) != NFSD_A_SERVER_SOCK_ADDR) continue; if (nla_parse_nested(tb, NFSD_A_SOCK_MAX, attr, nfsd_sock_nl_policy, info->extack) < 0) continue; if (!tb[NFSD_A_SOCK_ADDR] || !tb[NFSD_A_SOCK_TRANSPORT_NAME]) continue; if (nla_len(tb[NFSD_A_SOCK_ADDR]) < sizeof(*sa)) continue; xcl_name = nla_data(tb[NFSD_A_SOCK_TRANSPORT_NAME]); sa = nla_data(tb[NFSD_A_SOCK_ADDR]); /* Put back any matching sockets */ list_for_each_entry_safe(xprt, tmp, &permsocks, xpt_list) { /* This shouldn't be possible */ if (WARN_ON_ONCE(xprt->xpt_net != net)) { list_move(&xprt->xpt_list, &serv->sv_permsocks); continue; } /* If everything matches, put it back */ if (!strcmp(xprt->xpt_class->xcl_name, xcl_name) && rpc_cmp_addr_port(sa, (struct sockaddr *)&xprt->xpt_local)) { list_move(&xprt->xpt_list, &serv->sv_permsocks); break; } } } /* * If there are listener transports remaining on the permsocks list, * it means we were asked to remove a listener. */ if (!list_empty(&permsocks)) { list_splice_init(&permsocks, &serv->sv_permsocks); delete = true; } spin_unlock_bh(&serv->sv_lock); /* Do not remove listeners while there are active threads. */ if (serv->sv_nrthreads) { err = -EBUSY; goto out_unlock_mtx; } /* * Since we can't delete an arbitrary llist entry, destroy the * remaining listeners and recreate the list. */ if (delete) svc_xprt_destroy_all(serv, net); /* walk list of addrs again, open any that still don't exist */ nlmsg_for_each_attr(attr, info->nlhdr, GENL_HDRLEN, rem) { struct nlattr *tb[NFSD_A_SOCK_MAX + 1]; const char *xcl_name; struct sockaddr *sa; int ret; if (nla_type(attr) != NFSD_A_SERVER_SOCK_ADDR) continue; if (nla_parse_nested(tb, NFSD_A_SOCK_MAX, attr, nfsd_sock_nl_policy, info->extack) < 0) continue; if (!tb[NFSD_A_SOCK_ADDR] || !tb[NFSD_A_SOCK_TRANSPORT_NAME]) continue; if (nla_len(tb[NFSD_A_SOCK_ADDR]) < sizeof(*sa)) continue; xcl_name = nla_data(tb[NFSD_A_SOCK_TRANSPORT_NAME]); sa = nla_data(tb[NFSD_A_SOCK_ADDR]); xprt = svc_find_listener(serv, xcl_name, net, sa); if (xprt) { if (delete) WARN_ONCE(1, "Transport type=%s already exists\n", xcl_name); svc_xprt_put(xprt); continue; } ret = svc_xprt_create_from_sa(serv, xcl_name, net, sa, 0, get_current_cred()); /* always save the latest error */ if (ret < 0) err = ret; } if (!serv->sv_nrthreads && list_empty(&nn->nfsd_serv->sv_permsocks)) nfsd_destroy_serv(net); out_unlock_mtx: mutex_unlock(&nfsd_mutex); return err; } /** * nfsd_nl_listener_get_doit - get the nfs running listeners * @skb: reply buffer * @info: netlink metadata and command arguments * * Return 0 on success or a negative errno. */ int nfsd_nl_listener_get_doit(struct sk_buff *skb, struct genl_info *info) { struct svc_xprt *xprt; struct svc_serv *serv; struct nfsd_net *nn; void *hdr; int err; skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) return -ENOMEM; hdr = genlmsg_iput(skb, info); if (!hdr) { err = -EMSGSIZE; goto err_free_msg; } mutex_lock(&nfsd_mutex); nn = net_generic(genl_info_net(info), nfsd_net_id); /* no nfs server? Just send empty socket list */ if (!nn->nfsd_serv) goto out_unlock_mtx; serv = nn->nfsd_serv; spin_lock_bh(&serv->sv_lock); list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { struct nlattr *attr; attr = nla_nest_start(skb, NFSD_A_SERVER_SOCK_ADDR); if (!attr) { err = -EINVAL; goto err_serv_unlock; } if (nla_put_string(skb, NFSD_A_SOCK_TRANSPORT_NAME, xprt->xpt_class->xcl_name) || nla_put(skb, NFSD_A_SOCK_ADDR, sizeof(struct sockaddr_storage), &xprt->xpt_local)) { err = -EINVAL; goto err_serv_unlock; } nla_nest_end(skb, attr); } spin_unlock_bh(&serv->sv_lock); out_unlock_mtx: mutex_unlock(&nfsd_mutex); genlmsg_end(skb, hdr); return genlmsg_reply(skb, info); err_serv_unlock: spin_unlock_bh(&serv->sv_lock); mutex_unlock(&nfsd_mutex); err_free_msg: nlmsg_free(skb); return err; } /** * nfsd_nl_pool_mode_set_doit - set the number of running threads * @skb: reply buffer * @info: netlink metadata and command arguments * * Return 0 on success or a negative errno. */ int nfsd_nl_pool_mode_set_doit(struct sk_buff *skb, struct genl_info *info) { const struct nlattr *attr; if (GENL_REQ_ATTR_CHECK(info, NFSD_A_POOL_MODE_MODE)) return -EINVAL; attr = info->attrs[NFSD_A_POOL_MODE_MODE]; return sunrpc_set_pool_mode(nla_data(attr)); } /** * nfsd_nl_pool_mode_get_doit - get info about pool_mode * @skb: reply buffer * @info: netlink metadata and command arguments * * Return 0 on success or a negative errno. */ int nfsd_nl_pool_mode_get_doit(struct sk_buff *skb, struct genl_info *info) { struct net *net = genl_info_net(info); char buf[16]; void *hdr; int err; if (sunrpc_get_pool_mode(buf, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) return -ERANGE; skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) return -ENOMEM; err = -EMSGSIZE; hdr = genlmsg_iput(skb, info); if (!hdr) goto err_free_msg; err = nla_put_string(skb, NFSD_A_POOL_MODE_MODE, buf) | nla_put_u32(skb, NFSD_A_POOL_MODE_NPOOLS, nfsd_nrpools(net)); if (err) goto err_free_msg; genlmsg_end(skb, hdr); return genlmsg_reply(skb, info); err_free_msg: nlmsg_free(skb); return err; } /** * nfsd_net_init - Prepare the nfsd_net portion of a new net namespace * @net: a freshly-created network namespace * * This information stays around as long as the network namespace is * alive whether or not there is an NFSD instance running in the * namespace. * * Returns zero on success, or a negative errno otherwise. */ static __net_init int nfsd_net_init(struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); int retval; int i; retval = nfsd_export_init(net); if (retval) goto out_export_error; retval = nfsd_idmap_init(net); if (retval) goto out_idmap_error; retval = percpu_counter_init_many(nn->counter, 0, GFP_KERNEL, NFSD_STATS_COUNTERS_NUM); if (retval) goto out_repcache_error; memset(&nn->nfsd_svcstats, 0, sizeof(nn->nfsd_svcstats)); nn->nfsd_svcstats.program = &nfsd_programs[0]; if (!nfsd_proc_stat_init(net)) { retval = -ENOMEM; goto out_proc_error; } for (i = 0; i < sizeof(nn->nfsd_versions); i++) nn->nfsd_versions[i] = nfsd_support_version(i); for (i = 0; i < sizeof(nn->nfsd4_minorversions); i++) nn->nfsd4_minorversions[i] = nfsd_support_version(4); nn->nfsd_info.mutex = &nfsd_mutex; nn->nfsd_serv = NULL; nfsd4_init_leases_net(nn); get_random_bytes(&nn->siphash_key, sizeof(nn->siphash_key)); seqlock_init(&nn->writeverf_lock); #if IS_ENABLED(CONFIG_NFS_LOCALIO) spin_lock_init(&nn->local_clients_lock); INIT_LIST_HEAD(&nn->local_clients); #endif return 0; out_proc_error: percpu_counter_destroy_many(nn->counter, NFSD_STATS_COUNTERS_NUM); out_repcache_error: nfsd_idmap_shutdown(net); out_idmap_error: nfsd_export_shutdown(net); out_export_error: return retval; } #if IS_ENABLED(CONFIG_NFS_LOCALIO) /** * nfsd_net_pre_exit - Disconnect localio clients from net namespace * @net: a network namespace that is about to be destroyed * * This invalidates ->net pointers held by localio clients * while they can still safely access nn->counter. */ static __net_exit void nfsd_net_pre_exit(struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); nfs_localio_invalidate_clients(&nn->local_clients, &nn->local_clients_lock); } #endif /** * nfsd_net_exit - Release the nfsd_net portion of a net namespace * @net: a network namespace that is about to be destroyed * */ static __net_exit void nfsd_net_exit(struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); nfsd_proc_stat_shutdown(net); percpu_counter_destroy_many(nn->counter, NFSD_STATS_COUNTERS_NUM); nfsd_idmap_shutdown(net); nfsd_export_shutdown(net); } static struct pernet_operations nfsd_net_ops = { .init = nfsd_net_init, #if IS_ENABLED(CONFIG_NFS_LOCALIO) .pre_exit = nfsd_net_pre_exit, #endif .exit = nfsd_net_exit, .id = &nfsd_net_id, .size = sizeof(struct nfsd_net), }; static int __init init_nfsd(void) { int retval; retval = nfsd4_init_slabs(); if (retval) return retval; retval = nfsd4_init_pnfs(); if (retval) goto out_free_slabs; retval = nfsd_drc_slab_create(); if (retval) goto out_free_pnfs; nfsd_lockd_init(); /* lockd->nfsd callbacks */ retval = create_proc_exports_entry(); if (retval) goto out_free_lockd; retval = register_pernet_subsys(&nfsd_net_ops); if (retval < 0) goto out_free_exports; retval = register_cld_notifier(); if (retval) goto out_free_subsys; retval = nfsd4_create_laundry_wq(); if (retval) goto out_free_cld; retval = register_filesystem(&nfsd_fs_type); if (retval) goto out_free_all; retval = genl_register_family(&nfsd_nl_family); if (retval) goto out_free_all; nfsd_localio_ops_init(); return 0; out_free_all: nfsd4_destroy_laundry_wq(); out_free_cld: unregister_cld_notifier(); out_free_subsys: unregister_pernet_subsys(&nfsd_net_ops); out_free_exports: remove_proc_entry("fs/nfs/exports", NULL); remove_proc_entry("fs/nfs", NULL); out_free_lockd: nfsd_lockd_shutdown(); nfsd_drc_slab_free(); out_free_pnfs: nfsd4_exit_pnfs(); out_free_slabs: nfsd4_free_slabs(); return retval; } static void __exit exit_nfsd(void) { genl_unregister_family(&nfsd_nl_family); unregister_filesystem(&nfsd_fs_type); nfsd4_destroy_laundry_wq(); unregister_cld_notifier(); unregister_pernet_subsys(&nfsd_net_ops); nfsd_drc_slab_free(); remove_proc_entry("fs/nfs/exports", NULL); remove_proc_entry("fs/nfs", NULL); nfsd_lockd_shutdown(); nfsd4_free_slabs(); nfsd4_exit_pnfs(); } MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>"); MODULE_DESCRIPTION("In-kernel NFS server"); MODULE_LICENSE("GPL"); module_init(init_nfsd) module_exit(exit_nfsd)
17 11 11 11 17 17 16 17 17 17 5 5 5 6 9 9 6 6 6 4 6 6 6 6 6 4 6 2 2 2 2 2 2 12 12 12 12 9 9 9 9 9 7 7 7 7 5 5 5 2 4 5 5 3 5 7 7 5 2 7 7 7 7 7 6 7 6 6 6 6 6 4 7 7 4 3 2 3 3 3 3 4 3 1 3 4 12 12 12 2 1 12 2 12 4 12 1 5 5 2 1 4 6 5 6 1 2 2 2 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 // SPDX-License-Identifier: GPL-2.0 /* * RTC subsystem, interface functions * * Copyright (C) 2005 Tower Technologies * Author: Alessandro Zummo <a.zummo@towertech.it> * * based on arch/arm/common/rtctime.c */ #include <linux/rtc.h> #include <linux/sched.h> #include <linux/module.h> #include <linux/log2.h> #include <linux/workqueue.h> #define CREATE_TRACE_POINTS #include <trace/events/rtc.h> static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer); static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer); static void rtc_add_offset(struct rtc_device *rtc, struct rtc_time *tm) { time64_t secs; if (!rtc->offset_secs) return; secs = rtc_tm_to_time64(tm); /* * Since the reading time values from RTC device are always in the RTC * original valid range, but we need to skip the overlapped region * between expanded range and original range, which is no need to add * the offset. */ if ((rtc->start_secs > rtc->range_min && secs >= rtc->start_secs) || (rtc->start_secs < rtc->range_min && secs <= (rtc->start_secs + rtc->range_max - rtc->range_min))) return; rtc_time64_to_tm(secs + rtc->offset_secs, tm); } static void rtc_subtract_offset(struct rtc_device *rtc, struct rtc_time *tm) { time64_t secs; if (!rtc->offset_secs) return; secs = rtc_tm_to_time64(tm); /* * If the setting time values are in the valid range of RTC hardware * device, then no need to subtract the offset when setting time to RTC * device. Otherwise we need to subtract the offset to make the time * values are valid for RTC hardware device. */ if (secs >= rtc->range_min && secs <= rtc->range_max) return; rtc_time64_to_tm(secs - rtc->offset_secs, tm); } static int rtc_valid_range(struct rtc_device *rtc, struct rtc_time *tm) { if (rtc->range_min != rtc->range_max) { time64_t time = rtc_tm_to_time64(tm); time64_t range_min = rtc->set_start_time ? rtc->start_secs : rtc->range_min; timeu64_t range_max = rtc->set_start_time ? (rtc->start_secs + rtc->range_max - rtc->range_min) : rtc->range_max; if (time < range_min || time > range_max) return -ERANGE; } return 0; } static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm) { int err; if (!rtc->ops) { err = -ENODEV; } else if (!rtc->ops->read_time) { err = -EINVAL; } else { memset(tm, 0, sizeof(struct rtc_time)); err = rtc->ops->read_time(rtc->dev.parent, tm); if (err < 0) { dev_dbg(&rtc->dev, "read_time: fail to read: %d\n", err); return err; } rtc_add_offset(rtc, tm); err = rtc_valid_tm(tm); if (err < 0) dev_dbg(&rtc->dev, "read_time: rtc_time isn't valid\n"); } return err; } int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm) { int err; err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; err = __rtc_read_time(rtc, tm); mutex_unlock(&rtc->ops_lock); trace_rtc_read_time(rtc_tm_to_time64(tm), err); return err; } EXPORT_SYMBOL_GPL(rtc_read_time); int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm) { int err, uie; err = rtc_valid_tm(tm); if (err != 0) return err; err = rtc_valid_range(rtc, tm); if (err) return err; rtc_subtract_offset(rtc, tm); #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL uie = rtc->uie_rtctimer.enabled || rtc->uie_irq_active; #else uie = rtc->uie_rtctimer.enabled; #endif if (uie) { err = rtc_update_irq_enable(rtc, 0); if (err) return err; } err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; if (!rtc->ops) err = -ENODEV; else if (rtc->ops->set_time) err = rtc->ops->set_time(rtc->dev.parent, tm); else err = -EINVAL; pm_stay_awake(rtc->dev.parent); mutex_unlock(&rtc->ops_lock); /* A timer might have just expired */ schedule_work(&rtc->irqwork); if (uie) { err = rtc_update_irq_enable(rtc, 1); if (err) return err; } trace_rtc_set_time(rtc_tm_to_time64(tm), err); return err; } EXPORT_SYMBOL_GPL(rtc_set_time); static int rtc_read_alarm_internal(struct rtc_device *rtc, struct rtc_wkalrm *alarm) { int err; err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; if (!rtc->ops) { err = -ENODEV; } else if (!test_bit(RTC_FEATURE_ALARM, rtc->features) || !rtc->ops->read_alarm) { err = -EINVAL; } else { alarm->enabled = 0; alarm->pending = 0; alarm->time.tm_sec = -1; alarm->time.tm_min = -1; alarm->time.tm_hour = -1; alarm->time.tm_mday = -1; alarm->time.tm_mon = -1; alarm->time.tm_year = -1; alarm->time.tm_wday = -1; alarm->time.tm_yday = -1; alarm->time.tm_isdst = -1; err = rtc->ops->read_alarm(rtc->dev.parent, alarm); } mutex_unlock(&rtc->ops_lock); trace_rtc_read_alarm(rtc_tm_to_time64(&alarm->time), err); return err; } int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) { int err; struct rtc_time before, now; int first_time = 1; time64_t t_now, t_alm; enum { none, day, month, year } missing = none; unsigned int days; /* The lower level RTC driver may return -1 in some fields, * creating invalid alarm->time values, for reasons like: * * - The hardware may not be capable of filling them in; * many alarms match only on time-of-day fields, not * day/month/year calendar data. * * - Some hardware uses illegal values as "wildcard" match * values, which non-Linux firmware (like a BIOS) may try * to set up as e.g. "alarm 15 minutes after each hour". * Linux uses only oneshot alarms. * * When we see that here, we deal with it by using values from * a current RTC timestamp for any missing (-1) values. The * RTC driver prevents "periodic alarm" modes. * * But this can be racey, because some fields of the RTC timestamp * may have wrapped in the interval since we read the RTC alarm, * which would lead to us inserting inconsistent values in place * of the -1 fields. * * Reading the alarm and timestamp in the reverse sequence * would have the same race condition, and not solve the issue. * * So, we must first read the RTC timestamp, * then read the RTC alarm value, * and then read a second RTC timestamp. * * If any fields of the second timestamp have changed * when compared with the first timestamp, then we know * our timestamp may be inconsistent with that used by * the low-level rtc_read_alarm_internal() function. * * So, when the two timestamps disagree, we just loop and do * the process again to get a fully consistent set of values. * * This could all instead be done in the lower level driver, * but since more than one lower level RTC implementation needs it, * then it's probably best to do it here instead of there.. */ /* Get the "before" timestamp */ err = rtc_read_time(rtc, &before); if (err < 0) return err; do { if (!first_time) memcpy(&before, &now, sizeof(struct rtc_time)); first_time = 0; /* get the RTC alarm values, which may be incomplete */ err = rtc_read_alarm_internal(rtc, alarm); if (err) return err; /* full-function RTCs won't have such missing fields */ err = rtc_valid_tm(&alarm->time); if (!err) goto done; /* get the "after" timestamp, to detect wrapped fields */ err = rtc_read_time(rtc, &now); if (err < 0) return err; /* note that tm_sec is a "don't care" value here: */ } while (before.tm_min != now.tm_min || before.tm_hour != now.tm_hour || before.tm_mon != now.tm_mon || before.tm_year != now.tm_year); /* Fill in the missing alarm fields using the timestamp; we * know there's at least one since alarm->time is invalid. */ if (alarm->time.tm_sec == -1) alarm->time.tm_sec = now.tm_sec; if (alarm->time.tm_min == -1) alarm->time.tm_min = now.tm_min; if (alarm->time.tm_hour == -1) alarm->time.tm_hour = now.tm_hour; /* For simplicity, only support date rollover for now */ if (alarm->time.tm_mday < 1 || alarm->time.tm_mday > 31) { alarm->time.tm_mday = now.tm_mday; missing = day; } if ((unsigned int)alarm->time.tm_mon >= 12) { alarm->time.tm_mon = now.tm_mon; if (missing == none) missing = month; } if (alarm->time.tm_year == -1) { alarm->time.tm_year = now.tm_year; if (missing == none) missing = year; } /* Can't proceed if alarm is still invalid after replacing * missing fields. */ err = rtc_valid_tm(&alarm->time); if (err) goto done; /* with luck, no rollover is needed */ t_now = rtc_tm_to_time64(&now); t_alm = rtc_tm_to_time64(&alarm->time); if (t_now < t_alm) goto done; switch (missing) { /* 24 hour rollover ... if it's now 10am Monday, an alarm that * that will trigger at 5am will do so at 5am Tuesday, which * could also be in the next month or year. This is a common * case, especially for PCs. */ case day: dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day"); t_alm += 24 * 60 * 60; rtc_time64_to_tm(t_alm, &alarm->time); break; /* Month rollover ... if it's the 31th, an alarm on the 3rd will * be next month. An alarm matching on the 30th, 29th, or 28th * may end up in the month after that! Many newer PCs support * this type of alarm. */ case month: dev_dbg(&rtc->dev, "alarm rollover: %s\n", "month"); do { if (alarm->time.tm_mon < 11) { alarm->time.tm_mon++; } else { alarm->time.tm_mon = 0; alarm->time.tm_year++; } days = rtc_month_days(alarm->time.tm_mon, alarm->time.tm_year); } while (days < alarm->time.tm_mday); break; /* Year rollover ... easy except for leap years! */ case year: dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year"); do { alarm->time.tm_year++; } while (!is_leap_year(alarm->time.tm_year + 1900) && rtc_valid_tm(&alarm->time) != 0); break; default: dev_warn(&rtc->dev, "alarm rollover not handled\n"); } err = rtc_valid_tm(&alarm->time); done: if (err && alarm->enabled) dev_warn(&rtc->dev, "invalid alarm value: %ptR\n", &alarm->time); else rtc_add_offset(rtc, &alarm->time); return err; } int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) { int err; err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; if (!rtc->ops) { err = -ENODEV; } else if (!test_bit(RTC_FEATURE_ALARM, rtc->features)) { err = -EINVAL; } else { memset(alarm, 0, sizeof(struct rtc_wkalrm)); alarm->enabled = rtc->aie_timer.enabled; alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires); } mutex_unlock(&rtc->ops_lock); trace_rtc_read_alarm(rtc_tm_to_time64(&alarm->time), err); return err; } EXPORT_SYMBOL_GPL(rtc_read_alarm); static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) { struct rtc_time tm; time64_t now, scheduled; int err; err = rtc_valid_tm(&alarm->time); if (err) return err; scheduled = rtc_tm_to_time64(&alarm->time); /* Make sure we're not setting alarms in the past */ err = __rtc_read_time(rtc, &tm); if (err) return err; now = rtc_tm_to_time64(&tm); if (scheduled <= now) return -ETIME; /* * XXX - We just checked to make sure the alarm time is not * in the past, but there is still a race window where if * the is alarm set for the next second and the second ticks * over right here, before we set the alarm. */ rtc_subtract_offset(rtc, &alarm->time); if (!rtc->ops) err = -ENODEV; else if (!test_bit(RTC_FEATURE_ALARM, rtc->features)) err = -EINVAL; else err = rtc->ops->set_alarm(rtc->dev.parent, alarm); trace_rtc_set_alarm(rtc_tm_to_time64(&alarm->time), err); return err; } int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) { ktime_t alarm_time; int err; if (!rtc->ops) return -ENODEV; else if (!test_bit(RTC_FEATURE_ALARM, rtc->features)) return -EINVAL; err = rtc_valid_tm(&alarm->time); if (err != 0) return err; err = rtc_valid_range(rtc, &alarm->time); if (err) return err; err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; if (rtc->aie_timer.enabled) rtc_timer_remove(rtc, &rtc->aie_timer); alarm_time = rtc_tm_to_ktime(alarm->time); /* * Round down so we never miss a deadline, checking for past deadline is * done in __rtc_set_alarm */ if (test_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->features)) alarm_time = ktime_sub_ns(alarm_time, (u64)alarm->time.tm_sec * NSEC_PER_SEC); rtc->aie_timer.node.expires = alarm_time; rtc->aie_timer.period = 0; if (alarm->enabled) err = rtc_timer_enqueue(rtc, &rtc->aie_timer); mutex_unlock(&rtc->ops_lock); return err; } EXPORT_SYMBOL_GPL(rtc_set_alarm); /* Called once per device from rtc_device_register */ int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) { int err; struct rtc_time now; err = rtc_valid_tm(&alarm->time); if (err != 0) return err; err = rtc_read_time(rtc, &now); if (err) return err; err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); rtc->aie_timer.period = 0; /* Alarm has to be enabled & in the future for us to enqueue it */ if (alarm->enabled && (rtc_tm_to_ktime(now) < rtc->aie_timer.node.expires)) { rtc->aie_timer.enabled = 1; timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node); trace_rtc_timer_enqueue(&rtc->aie_timer); } mutex_unlock(&rtc->ops_lock); return err; } EXPORT_SYMBOL_GPL(rtc_initialize_alarm); int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled) { int err; err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; if (rtc->aie_timer.enabled != enabled) { if (enabled) err = rtc_timer_enqueue(rtc, &rtc->aie_timer); else rtc_timer_remove(rtc, &rtc->aie_timer); } if (err) /* nothing */; else if (!rtc->ops) err = -ENODEV; else if (!test_bit(RTC_FEATURE_ALARM, rtc->features) || !rtc->ops->alarm_irq_enable) err = -EINVAL; else err = rtc->ops->alarm_irq_enable(rtc->dev.parent, enabled); mutex_unlock(&rtc->ops_lock); trace_rtc_alarm_irq_enable(enabled, err); return err; } EXPORT_SYMBOL_GPL(rtc_alarm_irq_enable); int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled) { int err; err = mutex_lock_interruptible(&rtc->ops_lock); if (err) return err; #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL if (enabled == 0 && rtc->uie_irq_active) { mutex_unlock(&rtc->ops_lock); return rtc_dev_update_irq_enable_emul(rtc, 0); } #endif /* make sure we're changing state */ if (rtc->uie_rtctimer.enabled == enabled) goto out; if (!test_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features) || !test_bit(RTC_FEATURE_ALARM, rtc->features)) { mutex_unlock(&rtc->ops_lock); #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL return rtc_dev_update_irq_enable_emul(rtc, enabled); #else return -EINVAL; #endif } if (enabled) { struct rtc_time tm; ktime_t now, onesec; err = __rtc_read_time(rtc, &tm); if (err) goto out; onesec = ktime_set(1, 0); now = rtc_tm_to_ktime(tm); rtc->uie_rtctimer.node.expires = ktime_add(now, onesec); rtc->uie_rtctimer.period = ktime_set(1, 0); err = rtc_timer_enqueue(rtc, &rtc->uie_rtctimer); } else { rtc_timer_remove(rtc, &rtc->uie_rtctimer); } out: mutex_unlock(&rtc->ops_lock); return err; } EXPORT_SYMBOL_GPL(rtc_update_irq_enable); /** * rtc_handle_legacy_irq - AIE, UIE and PIE event hook * @rtc: pointer to the rtc device * @num: number of occurence of the event * @mode: type of the event, RTC_AF, RTC_UF of RTC_PF * * This function is called when an AIE, UIE or PIE mode interrupt * has occurred (or been emulated). * */ void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode) { unsigned long flags; /* mark one irq of the appropriate mode */ spin_lock_irqsave(&rtc->irq_lock, flags); rtc->irq_data = (rtc->irq_data + (num << 8)) | (RTC_IRQF | mode); spin_unlock_irqrestore(&rtc->irq_lock, flags); wake_up_interruptible(&rtc->irq_queue); kill_fasync(&rtc->async_queue, SIGIO, POLL_IN); } /** * rtc_aie_update_irq - AIE mode rtctimer hook * @rtc: pointer to the rtc_device * * This functions is called when the aie_timer expires. */ void rtc_aie_update_irq(struct rtc_device *rtc) { rtc_handle_legacy_irq(rtc, 1, RTC_AF); } /** * rtc_uie_update_irq - UIE mode rtctimer hook * @rtc: pointer to the rtc_device * * This functions is called when the uie_timer expires. */ void rtc_uie_update_irq(struct rtc_device *rtc) { rtc_handle_legacy_irq(rtc, 1, RTC_UF); } /** * rtc_pie_update_irq - PIE mode hrtimer hook * @timer: pointer to the pie mode hrtimer * * This function is used to emulate PIE mode interrupts * using an hrtimer. This function is called when the periodic * hrtimer expires. */ enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer) { struct rtc_device *rtc; ktime_t period; u64 count; rtc = container_of(timer, struct rtc_device, pie_timer); period = NSEC_PER_SEC / rtc->irq_freq; count = hrtimer_forward_now(timer, period); rtc_handle_legacy_irq(rtc, count, RTC_PF); return HRTIMER_RESTART; } /** * rtc_update_irq - Triggered when a RTC interrupt occurs. * @rtc: the rtc device * @num: how many irqs are being reported (usually one) * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF * Context: any */ void rtc_update_irq(struct rtc_device *rtc, unsigned long num, unsigned long events) { if (IS_ERR_OR_NULL(rtc)) return; pm_stay_awake(rtc->dev.parent); schedule_work(&rtc->irqwork); } EXPORT_SYMBOL_GPL(rtc_update_irq); struct rtc_device *rtc_class_open(const char *name) { struct device *dev; struct rtc_device *rtc = NULL; dev = class_find_device_by_name(&rtc_class, name); if (dev) rtc = to_rtc_device(dev); if (rtc) { if (!try_module_get(rtc->owner)) { put_device(dev); rtc = NULL; } } return rtc; } EXPORT_SYMBOL_GPL(rtc_class_open); void rtc_class_close(struct rtc_device *rtc) { module_put(rtc->owner); put_device(&rtc->dev); } EXPORT_SYMBOL_GPL(rtc_class_close); static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled) { /* * We always cancel the timer here first, because otherwise * we could run into BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); * when we manage to start the timer before the callback * returns HRTIMER_RESTART. * * We cannot use hrtimer_cancel() here as a running callback * could be blocked on rtc->irq_task_lock and hrtimer_cancel() * would spin forever. */ if (hrtimer_try_to_cancel(&rtc->pie_timer) < 0) return -1; if (enabled) { ktime_t period = NSEC_PER_SEC / rtc->irq_freq; hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL); } return 0; } /** * rtc_irq_set_state - enable/disable 2^N Hz periodic IRQs * @rtc: the rtc device * @enabled: true to enable periodic IRQs * Context: any * * Note that rtc_irq_set_freq() should previously have been used to * specify the desired frequency of periodic IRQ. */ int rtc_irq_set_state(struct rtc_device *rtc, int enabled) { int err = 0; while (rtc_update_hrtimer(rtc, enabled) < 0) cpu_relax(); rtc->pie_enabled = enabled; trace_rtc_irq_set_state(enabled, err); return err; } /** * rtc_irq_set_freq - set 2^N Hz periodic IRQ frequency for IRQ * @rtc: the rtc device * @freq: positive frequency * Context: any * * Note that rtc_irq_set_state() is used to enable or disable the * periodic IRQs. */ int rtc_irq_set_freq(struct rtc_device *rtc, int freq) { int err = 0; if (freq <= 0 || freq > RTC_MAX_FREQ) return -EINVAL; rtc->irq_freq = freq; while (rtc->pie_enabled && rtc_update_hrtimer(rtc, 1) < 0) cpu_relax(); trace_rtc_irq_set_freq(freq, err); return err; } /** * rtc_timer_enqueue - Adds a rtc_timer to the rtc_device timerqueue * @rtc: rtc device * @timer: timer being added. * * Enqueues a timer onto the rtc devices timerqueue and sets * the next alarm event appropriately. * * Sets the enabled bit on the added timer. * * Must hold ops_lock for proper serialization of timerqueue */ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) { struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue); struct rtc_time tm; ktime_t now; int err; err = __rtc_read_time(rtc, &tm); if (err) return err; timer->enabled = 1; now = rtc_tm_to_ktime(tm); /* Skip over expired timers */ while (next) { if (next->expires >= now) break; next = timerqueue_iterate_next(next); } timerqueue_add(&rtc->timerqueue, &timer->node); trace_rtc_timer_enqueue(timer); if (!next || ktime_before(timer->node.expires, next->expires)) { struct rtc_wkalrm alarm; alarm.time = rtc_ktime_to_tm(timer->node.expires); alarm.enabled = 1; err = __rtc_set_alarm(rtc, &alarm); if (err == -ETIME) { pm_stay_awake(rtc->dev.parent); schedule_work(&rtc->irqwork); } else if (err) { timerqueue_del(&rtc->timerqueue, &timer->node); trace_rtc_timer_dequeue(timer); timer->enabled = 0; return err; } } return 0; } static void rtc_alarm_disable(struct rtc_device *rtc) { if (!rtc->ops || !test_bit(RTC_FEATURE_ALARM, rtc->features) || !rtc->ops->alarm_irq_enable) return; rtc->ops->alarm_irq_enable(rtc->dev.parent, false); trace_rtc_alarm_irq_enable(0, 0); } /** * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue * @rtc: rtc device * @timer: timer being removed. * * Removes a timer onto the rtc devices timerqueue and sets * the next alarm event appropriately. * * Clears the enabled bit on the removed timer. * * Must hold ops_lock for proper serialization of timerqueue */ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer) { struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue); timerqueue_del(&rtc->timerqueue, &timer->node); trace_rtc_timer_dequeue(timer); timer->enabled = 0; if (next == &timer->node) { struct rtc_wkalrm alarm; int err; next = timerqueue_getnext(&rtc->timerqueue); if (!next) { rtc_alarm_disable(rtc); return; } alarm.time = rtc_ktime_to_tm(next->expires); alarm.enabled = 1; err = __rtc_set_alarm(rtc, &alarm); if (err == -ETIME) { pm_stay_awake(rtc->dev.parent); schedule_work(&rtc->irqwork); } } } /** * rtc_timer_do_work - Expires rtc timers * @work: work item * * Expires rtc timers. Reprograms next alarm event if needed. * Called via worktask. * * Serializes access to timerqueue via ops_lock mutex */ void rtc_timer_do_work(struct work_struct *work) { struct rtc_timer *timer; struct timerqueue_node *next; ktime_t now; struct rtc_time tm; int err; struct rtc_device *rtc = container_of(work, struct rtc_device, irqwork); mutex_lock(&rtc->ops_lock); again: err = __rtc_read_time(rtc, &tm); if (err) { mutex_unlock(&rtc->ops_lock); return; } now = rtc_tm_to_ktime(tm); while ((next = timerqueue_getnext(&rtc->timerqueue))) { if (next->expires > now) break; /* expire timer */ timer = container_of(next, struct rtc_timer, node); timerqueue_del(&rtc->timerqueue, &timer->node); trace_rtc_timer_dequeue(timer); timer->enabled = 0; if (timer->func) timer->func(timer->rtc); trace_rtc_timer_fired(timer); /* Re-add/fwd periodic timers */ if (ktime_to_ns(timer->period)) { timer->node.expires = ktime_add(timer->node.expires, timer->period); timer->enabled = 1; timerqueue_add(&rtc->timerqueue, &timer->node); trace_rtc_timer_enqueue(timer); } } /* Set next alarm */ if (next) { struct rtc_wkalrm alarm; int err; int retry = 3; alarm.time = rtc_ktime_to_tm(next->expires); alarm.enabled = 1; reprogram: err = __rtc_set_alarm(rtc, &alarm); if (err == -ETIME) { goto again; } else if (err) { if (retry-- > 0) goto reprogram; timer = container_of(next, struct rtc_timer, node); timerqueue_del(&rtc->timerqueue, &timer->node); trace_rtc_timer_dequeue(timer); timer->enabled = 0; dev_err(&rtc->dev, "__rtc_set_alarm: err=%d\n", err); goto again; } } else { rtc_alarm_disable(rtc); } pm_relax(rtc->dev.parent); mutex_unlock(&rtc->ops_lock); } /* rtc_timer_init - Initializes an rtc_timer * @timer: timer to be intiialized * @f: function pointer to be called when timer fires * @rtc: pointer to the rtc_device * * Kernel interface to initializing an rtc_timer. */ void rtc_timer_init(struct rtc_timer *timer, void (*f)(struct rtc_device *r), struct rtc_device *rtc) { timerqueue_init(&timer->node); timer->enabled = 0; timer->func = f; timer->rtc = rtc; } /* rtc_timer_start - Sets an rtc_timer to fire in the future * @ rtc: rtc device to be used * @ timer: timer being set * @ expires: time at which to expire the timer * @ period: period that the timer will recur * * Kernel interface to set an rtc_timer */ int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer, ktime_t expires, ktime_t period) { int ret = 0; mutex_lock(&rtc->ops_lock); if (timer->enabled) rtc_timer_remove(rtc, timer); timer->node.expires = expires; timer->period = period; ret = rtc_timer_enqueue(rtc, timer); mutex_unlock(&rtc->ops_lock); return ret; } /* rtc_timer_cancel - Stops an rtc_timer * @ rtc: rtc device to be used * @ timer: timer being set * * Kernel interface to cancel an rtc_timer */ void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer) { mutex_lock(&rtc->ops_lock); if (timer->enabled) rtc_timer_remove(rtc, timer); mutex_unlock(&rtc->ops_lock); } /** * rtc_read_offset - Read the amount of rtc offset in parts per billion * @rtc: rtc device to be used * @offset: the offset in parts per billion * * see below for details. * * Kernel interface to read rtc clock offset * Returns 0 on success, or a negative number on error. * If read_offset() is not implemented for the rtc, return -EINVAL */ int rtc_read_offset(struct rtc_device *rtc, long *offset) { int ret; if (!rtc->ops) return -ENODEV; if (!rtc->ops->read_offset) return -EINVAL; mutex_lock(&rtc->ops_lock); ret = rtc->ops->read_offset(rtc->dev.parent, offset); mutex_unlock(&rtc->ops_lock); trace_rtc_read_offset(*offset, ret); return ret; } /** * rtc_set_offset - Adjusts the duration of the average second * @rtc: rtc device to be used * @offset: the offset in parts per billion * * Some rtc's allow an adjustment to the average duration of a second * to compensate for differences in the actual clock rate due to temperature, * the crystal, capacitor, etc. * * The adjustment applied is as follows: * t = t0 * (1 + offset * 1e-9) * where t0 is the measured length of 1 RTC second with offset = 0 * * Kernel interface to adjust an rtc clock offset. * Return 0 on success, or a negative number on error. * If the rtc offset is not setable (or not implemented), return -EINVAL */ int rtc_set_offset(struct rtc_device *rtc, long offset) { int ret; if (!rtc->ops) return -ENODEV; if (!rtc->ops->set_offset) return -EINVAL; mutex_lock(&rtc->ops_lock); ret = rtc->ops->set_offset(rtc->dev.parent, offset); mutex_unlock(&rtc->ops_lock); trace_rtc_set_offset(offset, ret); return ret; }
11 68 171 1525 5992 1561 137 327 14 719 52 56 15 55 55 52 14 69 8 5 5 5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 /* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/userfaultfd_k.h * * Copyright (C) 2015 Red Hat, Inc. * */ #ifndef _LINUX_USERFAULTFD_K_H #define _LINUX_USERFAULTFD_K_H #ifdef CONFIG_USERFAULTFD #include <linux/userfaultfd.h> /* linux/include/uapi/linux/userfaultfd.h */ #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/swapops.h> #include <asm-generic/pgtable_uffd.h> #include <linux/hugetlb_inline.h> /* The set of all possible UFFD-related VM flags. */ #define __VM_UFFD_FLAGS (VM_UFFD_MISSING | VM_UFFD_WP | VM_UFFD_MINOR) /* * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining * new flags, since they might collide with O_* ones. We want * to re-use O_* flags that couldn't possibly have a meaning * from userfaultfd, in order to leave a free define-space for * shared O_* flags. */ #define UFFD_CLOEXEC O_CLOEXEC #define UFFD_NONBLOCK O_NONBLOCK #define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) #define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS) /* * Start with fault_pending_wqh and fault_wqh so they're more likely * to be in the same cacheline. * * Locking order: * fd_wqh.lock * fault_pending_wqh.lock * fault_wqh.lock * event_wqh.lock * * To avoid deadlocks, IRQs must be disabled when taking any of the above locks, * since fd_wqh.lock is taken by aio_poll() while it's holding a lock that's * also taken in IRQ context. */ struct userfaultfd_ctx { /* waitqueue head for the pending (i.e. not read) userfaults */ wait_queue_head_t fault_pending_wqh; /* waitqueue head for the userfaults */ wait_queue_head_t fault_wqh; /* waitqueue head for the pseudo fd to wakeup poll/read */ wait_queue_head_t fd_wqh; /* waitqueue head for events */ wait_queue_head_t event_wqh; /* a refile sequence protected by fault_pending_wqh lock */ seqcount_spinlock_t refile_seq; /* pseudo fd refcounting */ refcount_t refcount; /* userfaultfd syscall flags */ unsigned int flags; /* features requested from the userspace */ unsigned int features; /* released */ bool released; /* * Prevents userfaultfd operations (fill/move/wp) from happening while * some non-cooperative event(s) is taking place. Increments are done * in write-mode. Whereas, userfaultfd operations, which includes * reading mmap_changing, is done under read-mode. */ struct rw_semaphore map_changing_lock; /* memory mappings are changing because of non-cooperative event */ atomic_t mmap_changing; /* mm with one ore more vmas attached to this userfaultfd_ctx */ struct mm_struct *mm; }; extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason); /* A combined operation mode + behavior flags. */ typedef unsigned int __bitwise uffd_flags_t; /* Mutually exclusive modes of operation. */ enum mfill_atomic_mode { MFILL_ATOMIC_COPY, MFILL_ATOMIC_ZEROPAGE, MFILL_ATOMIC_CONTINUE, MFILL_ATOMIC_POISON, NR_MFILL_ATOMIC_MODES, }; #define MFILL_ATOMIC_MODE_BITS (const_ilog2(NR_MFILL_ATOMIC_MODES - 1) + 1) #define MFILL_ATOMIC_BIT(nr) BIT(MFILL_ATOMIC_MODE_BITS + (nr)) #define MFILL_ATOMIC_FLAG(nr) ((__force uffd_flags_t) MFILL_ATOMIC_BIT(nr)) #define MFILL_ATOMIC_MODE_MASK ((__force uffd_flags_t) (MFILL_ATOMIC_BIT(0) - 1)) static inline bool uffd_flags_mode_is(uffd_flags_t flags, enum mfill_atomic_mode expected) { return (flags & MFILL_ATOMIC_MODE_MASK) == ((__force uffd_flags_t) expected); } static inline uffd_flags_t uffd_flags_set_mode(uffd_flags_t flags, enum mfill_atomic_mode mode) { flags &= ~MFILL_ATOMIC_MODE_MASK; return flags | ((__force uffd_flags_t) mode); } /* Flags controlling behavior. These behavior changes are mode-independent. */ #define MFILL_ATOMIC_WP MFILL_ATOMIC_FLAG(0) extern int mfill_atomic_install_pte(pmd_t *dst_pmd, struct vm_area_struct *dst_vma, unsigned long dst_addr, struct page *page, bool newly_allocated, uffd_flags_t flags); extern ssize_t mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start, unsigned long src_start, unsigned long len, uffd_flags_t flags); extern ssize_t mfill_atomic_zeropage(struct userfaultfd_ctx *ctx, unsigned long dst_start, unsigned long len); extern ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx, unsigned long dst_start, unsigned long len, uffd_flags_t flags); extern ssize_t mfill_atomic_poison(struct userfaultfd_ctx *ctx, unsigned long start, unsigned long len, uffd_flags_t flags); extern int mwriteprotect_range(struct userfaultfd_ctx *ctx, unsigned long start, unsigned long len, bool enable_wp); extern long uffd_wp_range(struct vm_area_struct *vma, unsigned long start, unsigned long len, bool enable_wp); /* move_pages */ void double_pt_lock(spinlock_t *ptl1, spinlock_t *ptl2); void double_pt_unlock(spinlock_t *ptl1, spinlock_t *ptl2); ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start, unsigned long src_start, unsigned long len, __u64 flags); int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval, struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, unsigned long dst_addr, unsigned long src_addr); /* mm helpers */ static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, struct vm_userfaultfd_ctx vm_ctx) { return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx; } /* * Never enable huge pmd sharing on some uffd registered vmas: * * - VM_UFFD_WP VMAs, because write protect information is per pgtable entry. * * - VM_UFFD_MINOR VMAs, because otherwise we would never get minor faults for * VMAs which share huge pmds. (If you have two mappings to the same * underlying pages, and fault in the non-UFFD-registered one with a write, * with huge pmd sharing this would *also* setup the second UFFD-registered * mapping, and we'd not get minor faults.) */ static inline bool uffd_disable_huge_pmd_share(struct vm_area_struct *vma) { return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); } /* * Don't do fault around for either WP or MINOR registered uffd range. For * MINOR registered range, fault around will be a total disaster and ptes can * be installed without notifications; for WP it should mostly be fine as long * as the fault around checks for pte_none() before the installation, however * to be super safe we just forbid it. */ static inline bool uffd_disable_fault_around(struct vm_area_struct *vma) { return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); } static inline bool userfaultfd_missing(struct vm_area_struct *vma) { return vma->vm_flags & VM_UFFD_MISSING; } static inline bool userfaultfd_wp(struct vm_area_struct *vma) { return vma->vm_flags & VM_UFFD_WP; } static inline bool userfaultfd_minor(struct vm_area_struct *vma) { return vma->vm_flags & VM_UFFD_MINOR; } static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma, pte_t pte) { return userfaultfd_wp(vma) && pte_uffd_wp(pte); } static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma, pmd_t pmd) { return userfaultfd_wp(vma) && pmd_uffd_wp(pmd); } static inline bool userfaultfd_armed(struct vm_area_struct *vma) { return vma->vm_flags & __VM_UFFD_FLAGS; } static inline bool vma_can_userfault(struct vm_area_struct *vma, unsigned long vm_flags, bool wp_async) { vm_flags &= __VM_UFFD_FLAGS; if (vm_flags & VM_DROPPABLE) return false; if ((vm_flags & VM_UFFD_MINOR) && (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma))) return false; /* * If wp async enabled, and WP is the only mode enabled, allow any * memory type. */ if (wp_async && (vm_flags == VM_UFFD_WP)) return true; #ifndef CONFIG_PTE_MARKER_UFFD_WP /* * If user requested uffd-wp but not enabled pte markers for * uffd-wp, then shmem & hugetlbfs are not supported but only * anonymous. */ if ((vm_flags & VM_UFFD_WP) && !vma_is_anonymous(vma)) return false; #endif /* By default, allow any of anon|shmem|hugetlb */ return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) || vma_is_shmem(vma); } static inline bool vma_has_uffd_without_event_remap(struct vm_area_struct *vma) { struct userfaultfd_ctx *uffd_ctx = vma->vm_userfaultfd_ctx.ctx; return uffd_ctx && (uffd_ctx->features & UFFD_FEATURE_EVENT_REMAP) == 0; } extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *); extern void dup_userfaultfd_complete(struct list_head *); void dup_userfaultfd_fail(struct list_head *); extern void mremap_userfaultfd_prep(struct vm_area_struct *, struct vm_userfaultfd_ctx *); extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *, unsigned long from, unsigned long to, unsigned long len); extern bool userfaultfd_remove(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern int userfaultfd_unmap_prep(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct list_head *uf); extern void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf); extern bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma); extern bool userfaultfd_wp_async(struct vm_area_struct *vma); void userfaultfd_reset_ctx(struct vm_area_struct *vma); struct vm_area_struct *userfaultfd_clear_vma(struct vma_iterator *vmi, struct vm_area_struct *prev, struct vm_area_struct *vma, unsigned long start, unsigned long end); int userfaultfd_register_range(struct userfaultfd_ctx *ctx, struct vm_area_struct *vma, unsigned long vm_flags, unsigned long start, unsigned long end, bool wp_async); void userfaultfd_release_new(struct userfaultfd_ctx *ctx); void userfaultfd_release_all(struct mm_struct *mm, struct userfaultfd_ctx *ctx); #else /* CONFIG_USERFAULTFD */ /* mm helpers */ static inline vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) { return VM_FAULT_SIGBUS; } static inline long uffd_wp_range(struct vm_area_struct *vma, unsigned long start, unsigned long len, bool enable_wp) { return false; } static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, struct vm_userfaultfd_ctx vm_ctx) { return true; } static inline bool userfaultfd_missing(struct vm_area_struct *vma) { return false; } static inline bool userfaultfd_wp(struct vm_area_struct *vma) { return false; } static inline bool userfaultfd_minor(struct vm_area_struct *vma) { return false; } static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma, pte_t pte) { return false; } static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma, pmd_t pmd) { return false; } static inline bool userfaultfd_armed(struct vm_area_struct *vma) { return false; } static inline int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *l) { return 0; } static inline void dup_userfaultfd_complete(struct list_head *l) { } static inline void dup_userfaultfd_fail(struct list_head *l) { } static inline void mremap_userfaultfd_prep(struct vm_area_struct *vma, struct vm_userfaultfd_ctx *ctx) { } static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx, unsigned long from, unsigned long to, unsigned long len) { } static inline bool userfaultfd_remove(struct vm_area_struct *vma, unsigned long start, unsigned long end) { return true; } static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct list_head *uf) { return 0; } static inline void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf) { } static inline bool uffd_disable_fault_around(struct vm_area_struct *vma) { return false; } static inline bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma) { return false; } static inline bool userfaultfd_wp_async(struct vm_area_struct *vma) { return false; } static inline bool vma_has_uffd_without_event_remap(struct vm_area_struct *vma) { return false; } #endif /* CONFIG_USERFAULTFD */ static inline bool userfaultfd_wp_use_markers(struct vm_area_struct *vma) { /* Only wr-protect mode uses pte markers */ if (!userfaultfd_wp(vma)) return false; /* File-based uffd-wp always need markers */ if (!vma_is_anonymous(vma)) return true; /* * Anonymous uffd-wp only needs the markers if WP_UNPOPULATED * enabled (to apply markers on zero pages). */ return userfaultfd_wp_unpopulated(vma); } static inline bool pte_marker_entry_uffd_wp(swp_entry_t entry) { #ifdef CONFIG_PTE_MARKER_UFFD_WP return is_pte_marker_entry(entry) && (pte_marker_get(entry) & PTE_MARKER_UFFD_WP); #else return false; #endif } static inline bool pte_marker_uffd_wp(pte_t pte) { #ifdef CONFIG_PTE_MARKER_UFFD_WP swp_entry_t entry; if (!is_swap_pte(pte)) return false; entry = pte_to_swp_entry(pte); return pte_marker_entry_uffd_wp(entry); #else return false; #endif } /* * Returns true if this is a swap pte and was uffd-wp wr-protected in either * forms (pte marker or a normal swap pte), false otherwise. */ static inline bool pte_swp_uffd_wp_any(pte_t pte) { #ifdef CONFIG_PTE_MARKER_UFFD_WP if (!is_swap_pte(pte)) return false; if (pte_swp_uffd_wp(pte)) return true; if (pte_marker_uffd_wp(pte)) return true; #endif return false; } #endif /* _LINUX_USERFAULTFD_K_H */
19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 19 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 // SPDX-License-Identifier: GPL-2.0-or-later /* * RSA Signature Scheme with Appendix - PKCS #1 v1.5 (RFC 8017 sec 8.2) * * https://www.rfc-editor.org/rfc/rfc8017#section-8.2 * * Copyright (c) 2015 - 2024 Intel Corporation */ #include <linux/module.h> #include <linux/scatterlist.h> #include <crypto/akcipher.h> #include <crypto/algapi.h> #include <crypto/hash.h> #include <crypto/sig.h> #include <crypto/internal/akcipher.h> #include <crypto/internal/rsa.h> #include <crypto/internal/sig.h> /* * Full Hash Prefix for EMSA-PKCS1-v1_5 encoding method (RFC 9580 table 24) * * RSA keys are usual