Total coverage: 445123 (23%)of 1996051
8 8 1 8 8 8 8 8 8 8 8 8 8 8 3 2 4 4 4 3 5 5 3 3 3 4 4 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 // SPDX-License-Identifier: GPL-2.0-or-later /* * net/sched/sch_skbprio.c SKB Priority Queue. * * Authors: Nishanth Devarajan, <ndev2021@gmail.com> * Cody Doucette, <doucette@bu.edu> * original idea by Michel Machado, Cody Doucette, and Qiaobin Fu */ #include <linux/string.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <net/pkt_sched.h> #include <net/sch_generic.h> #include <net/inet_ecn.h> /* SKB Priority Queue * ================================= * * Skbprio (SKB Priority Queue) is a queueing discipline that prioritizes * packets according to their skb->priority field. Under congestion, * Skbprio drops already-enqueued lower priority packets to make space * available for higher priority packets; it was conceived as a solution * for denial-of-service defenses that need to route packets with different * priorities as a mean to overcome DoS attacks. */ struct skbprio_sched_data { /* Queue state. */ struct sk_buff_head qdiscs[SKBPRIO_MAX_PRIORITY]; struct gnet_stats_queue qstats[SKBPRIO_MAX_PRIORITY]; u16 highest_prio; u16 lowest_prio; }; static u16 calc_new_high_prio(const struct skbprio_sched_data *q) { int prio; for (prio = q->highest_prio - 1; prio >= q->lowest_prio; prio--) { if (!skb_queue_empty(&q->qdiscs[prio])) return prio; } /* SKB queue is empty, return 0 (default highest priority setting). */ return 0; } static u16 calc_new_low_prio(const struct skbprio_sched_data *q) { int prio; for (prio = q->lowest_prio + 1; prio <= q->highest_prio; prio++) { if (!skb_queue_empty(&q->qdiscs[prio])) return prio; } /* SKB queue is empty, return SKBPRIO_MAX_PRIORITY - 1 * (default lowest priority setting). */ return SKBPRIO_MAX_PRIORITY - 1; } static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { const unsigned int max_priority = SKBPRIO_MAX_PRIORITY - 1; struct skbprio_sched_data *q = qdisc_priv(sch); struct sk_buff_head *qdisc; struct sk_buff_head *lp_qdisc; struct sk_buff *to_drop; u16 prio, lp; /* Obtain the priority of @skb. */ prio = min(skb->priority, max_priority); qdisc = &q->qdiscs[prio]; /* sch->limit can change under us from skbprio_change() */ if (sch->q.qlen < READ_ONCE(sch->limit)) { __skb_queue_tail(qdisc, skb); qdisc_qstats_backlog_inc(sch, skb); q->qstats[prio].backlog += qdisc_pkt_len(skb); /* Check to update highest and lowest priorities. */ if (prio > q->highest_prio) q->highest_prio = prio; if (prio < q->lowest_prio) q->lowest_prio = prio; sch->q.qlen++; return NET_XMIT_SUCCESS; } /* If this packet has the lowest priority, drop it. */ lp = q->lowest_prio; if (prio <= lp) { q->qstats[prio].drops++; q->qstats[prio].overlimits++; return qdisc_drop(skb, sch, to_free); } __skb_queue_tail(qdisc, skb); qdisc_qstats_backlog_inc(sch, skb); q->qstats[prio].backlog += qdisc_pkt_len(skb); /* Drop the packet at the tail of the lowest priority qdisc. */ lp_qdisc = &q->qdiscs[lp]; to_drop = __skb_dequeue_tail(lp_qdisc); BUG_ON(!to_drop); qdisc_qstats_backlog_dec(sch, to_drop); qdisc_drop(to_drop, sch, to_free); q->qstats[lp].backlog -= qdisc_pkt_len(to_drop); q->qstats[lp].drops++; q->qstats[lp].overlimits++; /* Check to update highest and lowest priorities. */ if (skb_queue_empty(lp_qdisc)) { if (q->lowest_prio == q->highest_prio) { /* The incoming packet is the only packet in queue. */ BUG_ON(sch->q.qlen != 1); q->lowest_prio = prio; q->highest_prio = prio; } else { q->lowest_prio = calc_new_low_prio(q); } } if (prio > q->highest_prio) q->highest_prio = prio; return NET_XMIT_CN; } static struct sk_buff *skbprio_dequeue(struct Qdisc *sch) { struct skbprio_sched_data *q = qdisc_priv(sch); struct sk_buff_head *hpq = &q->qdiscs[q->highest_prio]; struct sk_buff *skb = __skb_dequeue(hpq); if (unlikely(!skb)) return NULL; sch->q.qlen--; qdisc_qstats_backlog_dec(sch, skb); qdisc_bstats_update(sch, skb); q->qstats[q->highest_prio].backlog -= qdisc_pkt_len(skb); /* Update highest priority field. */ if (skb_queue_empty(hpq)) { if (q->lowest_prio == q->highest_prio) { BUG_ON(sch->q.qlen); q->highest_prio = 0; q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1; } else { q->highest_prio = calc_new_high_prio(q); } } return skb; } static int skbprio_change(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct tc_skbprio_qopt *ctl = nla_data(opt); if (opt->nla_len != nla_attr_size(sizeof(*ctl))) return -EINVAL; WRITE_ONCE(sch->limit, ctl->limit); return 0; } static int skbprio_init(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct skbprio_sched_data *q = qdisc_priv(sch); int prio; /* Initialise all queues, one for each possible priority. */ for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++) __skb_queue_head_init(&q->qdiscs[prio]); memset(&q->qstats, 0, sizeof(q->qstats)); q->highest_prio = 0; q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1; sch->limit = 64; if (!opt) return 0; return skbprio_change(sch, opt, extack); } static int skbprio_dump(struct Qdisc *sch, struct sk_buff *skb) { struct tc_skbprio_qopt opt; opt.limit = READ_ONCE(sch->limit); if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) return -1; return skb->len; } static void skbprio_reset(struct Qdisc *sch) { struct skbprio_sched_data *q = qdisc_priv(sch); int prio; for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++) __skb_queue_purge(&q->qdiscs[prio]); memset(&q->qstats, 0, sizeof(q->qstats)); q->highest_prio = 0; q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1; } static void skbprio_destroy(struct Qdisc *sch) { struct skbprio_sched_data *q = qdisc_priv(sch); int prio; for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++) __skb_queue_purge(&q->qdiscs[prio]); } static struct Qdisc *skbprio_leaf(struct Qdisc *sch, unsigned long arg) { return NULL; } static unsigned long skbprio_find(struct Qdisc *sch, u32 classid) { return 0; } static int skbprio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { tcm->tcm_handle |= TC_H_MIN(cl); return 0; } static int skbprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, struct gnet_dump *d) { struct skbprio_sched_data *q = qdisc_priv(sch); if (gnet_stats_copy_queue(d, NULL, &q->qstats[cl - 1], q->qstats[cl - 1].qlen) < 0) return -1; return 0; } static void skbprio_walk(struct Qdisc *sch, struct qdisc_walker *arg) { unsigned int i; if (arg->stop) return; for (i = 0; i < SKBPRIO_MAX_PRIORITY; i++) { if (!tc_qdisc_stats_dump(sch, i + 1, arg)) break; } } static const struct Qdisc_class_ops skbprio_class_ops = { .leaf = skbprio_leaf, .find = skbprio_find, .dump = skbprio_dump_class, .dump_stats = skbprio_dump_class_stats, .walk = skbprio_walk, }; static struct Qdisc_ops skbprio_qdisc_ops __read_mostly = { .cl_ops = &skbprio_class_ops, .id = "skbprio", .priv_size = sizeof(struct skbprio_sched_data), .enqueue = skbprio_enqueue, .dequeue = skbprio_dequeue, .peek = qdisc_peek_dequeued, .init = skbprio_init, .reset = skbprio_reset, .change = skbprio_change, .dump = skbprio_dump, .destroy = skbprio_destroy, .owner = THIS_MODULE, }; MODULE_ALIAS_NET_SCH("skbprio"); static int __init skbprio_module_init(void) { return register_qdisc(&skbprio_qdisc_ops); } static void __exit skbprio_module_exit(void) { unregister_qdisc(&skbprio_qdisc_ops); } module_init(skbprio_module_init) module_exit(skbprio_module_exit) MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SKB priority based scheduling qdisc");
4 3 2 2 1 1 1 5 4 3 3 2 2 1 5 4 3 2 2 1 16 16 16 14 14 11 3 11 4 5 14 1 2 2 2 2 2 2 2 1 1 1 6 6 4 3 3 3 2 1 1 6 6 25 24 24 25 23 1 22 6 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net> * * Development of this code funded by Astaro AG (http://www.astaro.com/) */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/netlink.h> #include <linux/netfilter.h> #include <linux/netfilter/nf_tables.h> #include <net/netfilter/nf_tables_core.h> #include <net/netfilter/nf_tables.h> #include <net/netfilter/nf_tables_offload.h> struct nft_bitwise { u8 sreg; u8 sreg2; u8 dreg; enum nft_bitwise_ops op:8; u8 len; struct nft_data mask; struct nft_data xor; struct nft_data data; }; static void nft_bitwise_eval_mask_xor(u32 *dst, const u32 *src, const struct nft_bitwise *priv) { unsigned int i; for (i = 0; i < DIV_ROUND_UP(priv->len, sizeof(u32)); i++) dst[i] = (src[i] & priv->mask.data[i]) ^ priv->xor.data[i]; } static void nft_bitwise_eval_lshift(u32 *dst, const u32 *src, const struct nft_bitwise *priv) { u32 shift = priv->data.data[0]; unsigned int i; u32 carry = 0; for (i = DIV_ROUND_UP(priv->len, sizeof(u32)); i > 0; i--) { dst[i - 1] = (src[i - 1] << shift) | carry; carry = src[i - 1] >> (BITS_PER_TYPE(u32) - shift); } } static void nft_bitwise_eval_rshift(u32 *dst, const u32 *src, const struct nft_bitwise *priv) { u32 shift = priv->data.data[0]; unsigned int i; u32 carry = 0; for (i = 0; i < DIV_ROUND_UP(priv->len, sizeof(u32)); i++) { dst[i] = carry | (src[i] >> shift); carry = src[i] << (BITS_PER_TYPE(u32) - shift); } } static void nft_bitwise_eval_and(u32 *dst, const u32 *src, const u32 *src2, const struct nft_bitwise *priv) { unsigned int i, n; for (i = 0, n = DIV_ROUND_UP(priv->len, sizeof(u32)); i < n; i++) dst[i] = src[i] & src2[i]; } static void nft_bitwise_eval_or(u32 *dst, const u32 *src, const u32 *src2, const struct nft_bitwise *priv) { unsigned int i, n; for (i = 0, n = DIV_ROUND_UP(priv->len, sizeof(u32)); i < n; i++) dst[i] = src[i] | src2[i]; } static void nft_bitwise_eval_xor(u32 *dst, const u32 *src, const u32 *src2, const struct nft_bitwise *priv) { unsigned int i, n; for (i = 0, n = DIV_ROUND_UP(priv->len, sizeof(u32)); i < n; i++) dst[i] = src[i] ^ src2[i]; } void nft_bitwise_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_bitwise *priv = nft_expr_priv(expr); const u32 *src = &regs->data[priv->sreg], *src2; u32 *dst = &regs->data[priv->dreg]; if (priv->op == NFT_BITWISE_MASK_XOR) { nft_bitwise_eval_mask_xor(dst, src, priv); return; } if (priv->op == NFT_BITWISE_LSHIFT) { nft_bitwise_eval_lshift(dst, src, priv); return; } if (priv->op == NFT_BITWISE_RSHIFT) { nft_bitwise_eval_rshift(dst, src, priv); return; } src2 = priv->sreg2 ? &regs->data[priv->sreg2] : priv->data.data; if (priv->op == NFT_BITWISE_AND) { nft_bitwise_eval_and(dst, src, src2, priv); return; } if (priv->op == NFT_BITWISE_OR) { nft_bitwise_eval_or(dst, src, src2, priv); return; } if (priv->op == NFT_BITWISE_XOR) { nft_bitwise_eval_xor(dst, src, src2, priv); return; } } static const struct nla_policy nft_bitwise_policy[NFTA_BITWISE_MAX + 1] = { [NFTA_BITWISE_SREG] = { .type = NLA_U32 }, [NFTA_BITWISE_SREG2] = { .type = NLA_U32 }, [NFTA_BITWISE_DREG] = { .type = NLA_U32 }, [NFTA_BITWISE_LEN] = { .type = NLA_U32 }, [NFTA_BITWISE_MASK] = { .type = NLA_NESTED }, [NFTA_BITWISE_XOR] = { .type = NLA_NESTED }, [NFTA_BITWISE_OP] = NLA_POLICY_MAX(NLA_BE32, 255), [NFTA_BITWISE_DATA] = { .type = NLA_NESTED }, }; static int nft_bitwise_init_mask_xor(struct nft_bitwise *priv, const struct nlattr *const tb[]) { struct nft_data_desc mask = { .type = NFT_DATA_VALUE, .size = sizeof(priv->mask), .len = priv->len, }; struct nft_data_desc xor = { .type = NFT_DATA_VALUE, .size = sizeof(priv->xor), .len = priv->len, }; int err; if (tb[NFTA_BITWISE_DATA] || tb[NFTA_BITWISE_SREG2]) return -EINVAL; if (!tb[NFTA_BITWISE_MASK] || !tb[NFTA_BITWISE_XOR]) return -EINVAL; err = nft_data_init(NULL, &priv->mask, &mask, tb[NFTA_BITWISE_MASK]); if (err < 0) return err; err = nft_data_init(NULL, &priv->xor, &xor, tb[NFTA_BITWISE_XOR]); if (err < 0) goto err_xor_err; return 0; err_xor_err: nft_data_release(&priv->mask, mask.type); return err; } static int nft_bitwise_init_shift(struct nft_bitwise *priv, const struct nlattr *const tb[]) { struct nft_data_desc desc = { .type = NFT_DATA_VALUE, .size = sizeof(priv->data), .len = sizeof(u32), }; int err; if (tb[NFTA_BITWISE_MASK] || tb[NFTA_BITWISE_XOR] || tb[NFTA_BITWISE_SREG2]) return -EINVAL; if (!tb[NFTA_BITWISE_DATA]) return -EINVAL; err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_BITWISE_DATA]); if (err < 0) return err; if (priv->data.data[0] >= BITS_PER_TYPE(u32)) { nft_data_release(&priv->data, desc.type); return -EINVAL; } return 0; } static int nft_bitwise_init_bool(const struct nft_ctx *ctx, struct nft_bitwise *priv, const struct nlattr *const tb[]) { int err; if (tb[NFTA_BITWISE_MASK] || tb[NFTA_BITWISE_XOR]) return -EINVAL; if ((!tb[NFTA_BITWISE_DATA] && !tb[NFTA_BITWISE_SREG2]) || (tb[NFTA_BITWISE_DATA] && tb[NFTA_BITWISE_SREG2])) return -EINVAL; if (tb[NFTA_BITWISE_DATA]) { struct nft_data_desc desc = { .type = NFT_DATA_VALUE, .size = sizeof(priv->data), .len = priv->len, }; err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_BITWISE_DATA]); if (err < 0) return err; } else { err = nft_parse_register_load(ctx, tb[NFTA_BITWISE_SREG2], &priv->sreg2, priv->len); if (err < 0) return err; } return 0; } static int nft_bitwise_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_bitwise *priv = nft_expr_priv(expr); u32 len; int err; err = nft_parse_u32_check(tb[NFTA_BITWISE_LEN], U8_MAX, &len); if (err < 0) return err; priv->len = len; err = nft_parse_register_load(ctx, tb[NFTA_BITWISE_SREG], &priv->sreg, priv->len); if (err < 0) return err; err = nft_parse_register_store(ctx, tb[NFTA_BITWISE_DREG], &priv->dreg, NULL, NFT_DATA_VALUE, priv->len); if (err < 0) return err; if (tb[NFTA_BITWISE_OP]) { priv->op = ntohl(nla_get_be32(tb[NFTA_BITWISE_OP])); switch (priv->op) { case NFT_BITWISE_MASK_XOR: case NFT_BITWISE_LSHIFT: case NFT_BITWISE_RSHIFT: case NFT_BITWISE_AND: case NFT_BITWISE_OR: case NFT_BITWISE_XOR: break; default: return -EOPNOTSUPP; } } else { priv->op = NFT_BITWISE_MASK_XOR; } switch(priv->op) { case NFT_BITWISE_MASK_XOR: err = nft_bitwise_init_mask_xor(priv, tb); break; case NFT_BITWISE_LSHIFT: case NFT_BITWISE_RSHIFT: err = nft_bitwise_init_shift(priv, tb); break; case NFT_BITWISE_AND: case NFT_BITWISE_OR: case NFT_BITWISE_XOR: err = nft_bitwise_init_bool(ctx, priv, tb); break; } return err; } static int nft_bitwise_dump_mask_xor(struct sk_buff *skb, const struct nft_bitwise *priv) { if (nft_data_dump(skb, NFTA_BITWISE_MASK, &priv->mask, NFT_DATA_VALUE, priv->len) < 0) return -1; if (nft_data_dump(skb, NFTA_BITWISE_XOR, &priv->xor, NFT_DATA_VALUE, priv->len) < 0) return -1; return 0; } static int nft_bitwise_dump_shift(struct sk_buff *skb, const struct nft_bitwise *priv) { if (nft_data_dump(skb, NFTA_BITWISE_DATA, &priv->data, NFT_DATA_VALUE, sizeof(u32)) < 0) return -1; return 0; } static int nft_bitwise_dump_bool(struct sk_buff *skb, const struct nft_bitwise *priv) { if (priv->sreg2) { if (nft_dump_register(skb, NFTA_BITWISE_SREG2, priv->sreg2)) return -1; } else { if (nft_data_dump(skb, NFTA_BITWISE_DATA, &priv->data, NFT_DATA_VALUE, sizeof(u32)) < 0) return -1; } return 0; } static int nft_bitwise_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset) { const struct nft_bitwise *priv = nft_expr_priv(expr); int err = 0; if (nft_dump_register(skb, NFTA_BITWISE_SREG, priv->sreg)) return -1; if (nft_dump_register(skb, NFTA_BITWISE_DREG, priv->dreg)) return -1; if (nla_put_be32(skb, NFTA_BITWISE_LEN, htonl(priv->len))) return -1; if (nla_put_be32(skb, NFTA_BITWISE_OP, htonl(priv->op))) return -1; switch (priv->op) { case NFT_BITWISE_MASK_XOR: err = nft_bitwise_dump_mask_xor(skb, priv); break; case NFT_BITWISE_LSHIFT: case NFT_BITWISE_RSHIFT: err = nft_bitwise_dump_shift(skb, priv); break; case NFT_BITWISE_AND: case NFT_BITWISE_OR: case NFT_BITWISE_XOR: err = nft_bitwise_dump_bool(skb, priv); break; } return err; } static struct nft_data zero; static int nft_bitwise_offload(struct nft_offload_ctx *ctx, struct nft_flow_rule *flow, const struct nft_expr *expr) { const struct nft_bitwise *priv = nft_expr_priv(expr); struct nft_offload_reg *reg = &ctx->regs[priv->dreg]; if (priv->op != NFT_BITWISE_MASK_XOR) return -EOPNOTSUPP; if (memcmp(&priv->xor, &zero, sizeof(priv->xor)) || priv->sreg != priv->dreg || priv->len != reg->len) return -EOPNOTSUPP; memcpy(&reg->mask, &priv->mask, sizeof(priv->mask)); return 0; } static bool nft_bitwise_reduce(struct nft_regs_track *track, const struct nft_expr *expr) { const struct nft_bitwise *priv = nft_expr_priv(expr); const struct nft_bitwise *bitwise; unsigned int regcount; u8 dreg; int i; if (!track->regs[priv->sreg].selector) return false; bitwise = nft_expr_priv(track->regs[priv->dreg].selector); if (track->regs[priv->sreg].selector == track->regs[priv->dreg].selector && track->regs[priv->sreg].num_reg == 0 && track->regs[priv->dreg].bitwise && track->regs[priv->dreg].bitwise->ops == expr->ops && priv->sreg == bitwise->sreg && priv->sreg2 == bitwise->sreg2 && priv->dreg == bitwise->dreg && priv->op == bitwise->op && priv->len == bitwise->len && !memcmp(&priv->mask, &bitwise->mask, sizeof(priv->mask)) && !memcmp(&priv->xor, &bitwise->xor, sizeof(priv->xor)) && !memcmp(&priv->data, &bitwise->data, sizeof(priv->data))) { track->cur = expr; return true; } if (track->regs[priv->sreg].bitwise || track->regs[priv->sreg].num_reg != 0) { nft_reg_track_cancel(track, priv->dreg, priv->len); return false; } if (priv->sreg != priv->dreg) { nft_reg_track_update(track, track->regs[priv->sreg].selector, priv->dreg, priv->len); } dreg = priv->dreg; regcount = DIV_ROUND_UP(priv->len, NFT_REG32_SIZE); for (i = 0; i < regcount; i++, dreg++) track->regs[dreg].bitwise = expr; return false; } static const struct nft_expr_ops nft_bitwise_ops = { .type = &nft_bitwise_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_bitwise)), .eval = nft_bitwise_eval, .init = nft_bitwise_init, .dump = nft_bitwise_dump, .reduce = nft_bitwise_reduce, .offload = nft_bitwise_offload, }; static int nft_bitwise_extract_u32_data(const struct nlattr * const tb, u32 *out) { struct nft_data data; struct nft_data_desc desc = { .type = NFT_DATA_VALUE, .size = sizeof(data), .len = sizeof(u32), }; int err; err = nft_data_init(NULL, &data, &desc, tb); if (err < 0) return err; *out = data.data[0]; return 0; } static int nft_bitwise_fast_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_bitwise_fast_expr *priv = nft_expr_priv(expr); int err; err = nft_parse_register_load(ctx, tb[NFTA_BITWISE_SREG], &priv->sreg, sizeof(u32)); if (err < 0) return err; err = nft_parse_register_store(ctx, tb[NFTA_BITWISE_DREG], &priv->dreg, NULL, NFT_DATA_VALUE, sizeof(u32)); if (err < 0) return err; if (tb[NFTA_BITWISE_DATA] || tb[NFTA_BITWISE_SREG2]) return -EINVAL; if (!tb[NFTA_BITWISE_MASK] || !tb[NFTA_BITWISE_XOR]) return -EINVAL; err = nft_bitwise_extract_u32_data(tb[NFTA_BITWISE_MASK], &priv->mask); if (err < 0) return err; err = nft_bitwise_extract_u32_data(tb[NFTA_BITWISE_XOR], &priv->xor); if (err < 0) return err; return 0; } static int nft_bitwise_fast_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset) { const struct nft_bitwise_fast_expr *priv = nft_expr_priv(expr); struct nft_data data; if (nft_dump_register(skb, NFTA_BITWISE_SREG, priv->sreg)) return -1; if (nft_dump_register(skb, NFTA_BITWISE_DREG, priv->dreg)) return -1; if (nla_put_be32(skb, NFTA_BITWISE_LEN, htonl(sizeof(u32)))) return -1; if (nla_put_be32(skb, NFTA_BITWISE_OP, htonl(NFT_BITWISE_MASK_XOR))) return -1; data.data[0] = priv->mask; if (nft_data_dump(skb, NFTA_BITWISE_MASK, &data, NFT_DATA_VALUE, sizeof(u32)) < 0) return -1; data.data[0] = priv->xor; if (nft_data_dump(skb, NFTA_BITWISE_XOR, &data, NFT_DATA_VALUE, sizeof(u32)) < 0) return -1; return 0; } static int nft_bitwise_fast_offload(struct nft_offload_ctx *ctx, struct nft_flow_rule *flow, const struct nft_expr *expr) { const struct nft_bitwise_fast_expr *priv = nft_expr_priv(expr); struct nft_offload_reg *reg = &ctx->regs[priv->dreg]; if (priv->xor || priv->sreg != priv->dreg || reg->len != sizeof(u32)) return -EOPNOTSUPP; reg->mask.data[0] = priv->mask; return 0; } static bool nft_bitwise_fast_reduce(struct nft_regs_track *track, const struct nft_expr *expr) { const struct nft_bitwise_fast_expr *priv = nft_expr_priv(expr); const struct nft_bitwise_fast_expr *bitwise; if (!track->regs[priv->sreg].selector) return false; bitwise = nft_expr_priv(track->regs[priv->dreg].selector); if (track->regs[priv->sreg].selector == track->regs[priv->dreg].selector && track->regs[priv->dreg].bitwise && track->regs[priv->dreg].bitwise->ops == expr->ops && priv->sreg == bitwise->sreg && priv->dreg == bitwise->dreg && priv->mask == bitwise->mask && priv->xor == bitwise->xor) { track->cur = expr; return true; } if (track->regs[priv->sreg].bitwise) { nft_reg_track_cancel(track, priv->dreg, NFT_REG32_SIZE); return false; } if (priv->sreg != priv->dreg) { track->regs[priv->dreg].selector = track->regs[priv->sreg].selector; } track->regs[priv->dreg].bitwise = expr; return false; } const struct nft_expr_ops nft_bitwise_fast_ops = { .type = &nft_bitwise_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_bitwise_fast_expr)), .eval = NULL, /* inlined */ .init = nft_bitwise_fast_init, .dump = nft_bitwise_fast_dump, .reduce = nft_bitwise_fast_reduce, .offload = nft_bitwise_fast_offload, }; static const struct nft_expr_ops * nft_bitwise_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) { int err; u32 len; if (!tb[NFTA_BITWISE_LEN] || !tb[NFTA_BITWISE_SREG] || !tb[NFTA_BITWISE_DREG]) return ERR_PTR(-EINVAL); err = nft_parse_u32_check(tb[NFTA_BITWISE_LEN], U8_MAX, &len); if (err < 0) return ERR_PTR(err); if (len != sizeof(u32)) return &nft_bitwise_ops; if (tb[NFTA_BITWISE_OP] && ntohl(nla_get_be32(tb[NFTA_BITWISE_OP])) != NFT_BITWISE_MASK_XOR) return &nft_bitwise_ops; return &nft_bitwise_fast_ops; } struct nft_expr_type nft_bitwise_type __read_mostly = { .name = "bitwise", .select_ops = nft_bitwise_select_ops, .policy = nft_bitwise_policy, .maxattr = NFTA_BITWISE_MAX, .owner = THIS_MODULE, }; bool nft_expr_reduce_bitwise(struct nft_regs_track *track, const struct nft_expr *expr) { const struct nft_expr *last = track->last; const struct nft_expr *next; if (expr == last) return false; next = nft_expr_next(expr); if (next->ops == &nft_bitwise_ops) return nft_bitwise_reduce(track, next); else if (next->ops == &nft_bitwise_fast_ops) return nft_bitwise_fast_reduce(track, next); return false; } EXPORT_SYMBOL_GPL(nft_expr_reduce_bitwise);
1 1 1 2 2 1 1 2 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 // SPDX-License-Identifier: GPL-2.0-or-later /* * MSI GT683R led driver * * Copyright (c) 2014 Janne Kanniainen <janne.kanniainen@gmail.com> */ #include <linux/device.h> #include <linux/hid.h> #include <linux/kernel.h> #include <linux/leds.h> #include <linux/module.h> #include "hid-ids.h" #define GT683R_BUFFER_SIZE 8 /* * GT683R_LED_OFF: all LEDs are off * GT683R_LED_AUDIO: LEDs brightness depends on sound level * GT683R_LED_BREATHING: LEDs brightness varies at human breathing rate * GT683R_LED_NORMAL: LEDs are fully on when enabled */ enum gt683r_led_mode { GT683R_LED_OFF = 0, GT683R_LED_AUDIO = 2, GT683R_LED_BREATHING = 3, GT683R_LED_NORMAL = 5 }; enum gt683r_panels { GT683R_LED_BACK = 0, GT683R_LED_SIDE = 1, GT683R_LED_FRONT = 2, GT683R_LED_COUNT, }; static const char * const gt683r_panel_names[] = { "back", "side", "front", }; struct gt683r_led { struct hid_device *hdev; struct led_classdev led_devs[GT683R_LED_COUNT]; struct mutex lock; struct work_struct work; enum led_brightness brightnesses[GT683R_LED_COUNT]; enum gt683r_led_mode mode; }; static const struct hid_device_id gt683r_led_id[] = { { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) }, { } }; MODULE_DEVICE_TABLE(hid, gt683r_led_id); static void gt683r_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) { int i; struct device *dev = led_cdev->dev->parent; struct hid_device *hdev = to_hid_device(dev); struct gt683r_led *led = hid_get_drvdata(hdev); for (i = 0; i < GT683R_LED_COUNT; i++) { if (led_cdev == &led->led_devs[i]) break; } if (i < GT683R_LED_COUNT) { led->brightnesses[i] = brightness; schedule_work(&led->work); } } static ssize_t mode_show(struct device *dev, struct device_attribute *attr, char *buf) { u8 sysfs_mode; struct hid_device *hdev = to_hid_device(dev->parent); struct gt683r_led *led = hid_get_drvdata(hdev); if (led->mode == GT683R_LED_NORMAL) sysfs_mode = 0; else if (led->mode == GT683R_LED_AUDIO) sysfs_mode = 1; else sysfs_mode = 2; return scnprintf(buf, PAGE_SIZE, "%u\n", sysfs_mode); } static ssize_t mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { u8 sysfs_mode; struct hid_device *hdev = to_hid_device(dev->parent); struct gt683r_led *led = hid_get_drvdata(hdev); if (kstrtou8(buf, 10, &sysfs_mode) || sysfs_mode > 2) return -EINVAL; mutex_lock(&led->lock); if (sysfs_mode == 0) led->mode = GT683R_LED_NORMAL; else if (sysfs_mode == 1) led->mode = GT683R_LED_AUDIO; else led->mode = GT683R_LED_BREATHING; mutex_unlock(&led->lock); schedule_work(&led->work); return count; } static int gt683r_led_snd_msg(struct gt683r_led *led, u8 *msg) { int ret; ret = hid_hw_raw_request(led->hdev, msg[0], msg, GT683R_BUFFER_SIZE, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); if (ret != GT683R_BUFFER_SIZE) { hid_err(led->hdev, "failed to send set report request: %i\n", ret); if (ret < 0) return ret; return -EIO; } return 0; } static int gt683r_leds_set(struct gt683r_led *led, u8 leds) { int ret; u8 *buffer; buffer = kzalloc(GT683R_BUFFER_SIZE, GFP_KERNEL); if (!buffer) return -ENOMEM; buffer[0] = 0x01; buffer[1] = 0x02; buffer[2] = 0x30; buffer[3] = leds; ret = gt683r_led_snd_msg(led, buffer); kfree(buffer); return ret; } static int gt683r_mode_set(struct gt683r_led *led, u8 mode) { int ret; u8 *buffer; buffer = kzalloc(GT683R_BUFFER_SIZE, GFP_KERNEL); if (!buffer) return -ENOMEM; buffer[0] = 0x01; buffer[1] = 0x02; buffer[2] = 0x20; buffer[3] = mode; buffer[4] = 0x01; ret = gt683r_led_snd_msg(led, buffer); kfree(buffer); return ret; } static void gt683r_led_work(struct work_struct *work) { int i; u8 leds = 0; u8 mode; struct gt683r_led *led = container_of(work, struct gt683r_led, work); mutex_lock(&led->lock); for (i = 0; i < GT683R_LED_COUNT; i++) { if (led->brightnesses[i]) leds |= BIT(i); } if (gt683r_leds_set(led, leds)) goto fail; if (leds) mode = led->mode; else mode = GT683R_LED_OFF; gt683r_mode_set(led, mode); fail: mutex_unlock(&led->lock); } static DEVICE_ATTR_RW(mode); static struct attribute *gt683r_led_attrs[] = { &dev_attr_mode.attr, NULL }; static const struct attribute_group gt683r_led_group = { .name = "gt683r", .attrs = gt683r_led_attrs, }; static const struct attribute_group *gt683r_led_groups[] = { &gt683r_led_group, NULL }; static int gt683r_led_probe(struct hid_device *hdev, const struct hid_device_id *id) { int i; int ret; int name_sz; char *name; struct gt683r_led *led; led = devm_kzalloc(&hdev->dev, sizeof(*led), GFP_KERNEL); if (!led) return -ENOMEM; mutex_init(&led->lock); INIT_WORK(&led->work, gt683r_led_work); led->mode = GT683R_LED_NORMAL; led->hdev = hdev; hid_set_drvdata(hdev, led); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "hid parsing failed\n"); return ret; } ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW); if (ret) { hid_err(hdev, "hw start failed\n"); return ret; } for (i = 0; i < GT683R_LED_COUNT; i++) { name_sz = strlen(dev_name(&hdev->dev)) + strlen(gt683r_panel_names[i]) + 3; name = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL); if (!name) { ret = -ENOMEM; goto fail; } snprintf(name, name_sz, "%s::%s", dev_name(&hdev->dev), gt683r_panel_names[i]); led->led_devs[i].name = name; led->led_devs[i].max_brightness = 1; led->led_devs[i].brightness_set = gt683r_brightness_set; led->led_devs[i].groups = gt683r_led_groups; ret = led_classdev_register(&hdev->dev, &led->led_devs[i]); if (ret) { hid_err(hdev, "could not register led device\n"); goto fail; } } return 0; fail: for (i = i - 1; i >= 0; i--) led_classdev_unregister(&led->led_devs[i]); hid_hw_stop(hdev); return ret; } static void gt683r_led_remove(struct hid_device *hdev) { int i; struct gt683r_led *led = hid_get_drvdata(hdev); for (i = 0; i < GT683R_LED_COUNT; i++) led_classdev_unregister(&led->led_devs[i]); flush_work(&led->work); hid_hw_stop(hdev); } static struct hid_driver gt683r_led_driver = { .probe = gt683r_led_probe, .remove = gt683r_led_remove, .name = "gt683r_led", .id_table = gt683r_led_id, }; module_hid_driver(gt683r_led_driver); MODULE_AUTHOR("Janne Kanniainen"); MODULE_DESCRIPTION("MSI GT683R led driver"); MODULE_LICENSE("GPL");
2 2 1 1 1 1 2 2 2 1 2 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 // SPDX-License-Identifier: GPL-2.0-only #include "netlink.h" #include "common.h" struct rss_req_info { struct ethnl_req_info base; u32 rss_context; }; struct rss_reply_data { struct ethnl_reply_data base; bool no_key_fields; u32 indir_size; u32 hkey_size; u32 hfunc; u32 input_xfrm; u32 *indir_table; u8 *hkey; }; #define RSS_REQINFO(__req_base) \ container_of(__req_base, struct rss_req_info, base) #define RSS_REPDATA(__reply_base) \ container_of(__reply_base, struct rss_reply_data, base) const struct nla_policy ethnl_rss_get_policy[] = { [ETHTOOL_A_RSS_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), [ETHTOOL_A_RSS_CONTEXT] = { .type = NLA_U32 }, [ETHTOOL_A_RSS_START_CONTEXT] = { .type = NLA_U32 }, }; static int rss_parse_request(struct ethnl_req_info *req_info, struct nlattr **tb, struct netlink_ext_ack *extack) { struct rss_req_info *request = RSS_REQINFO(req_info); if (tb[ETHTOOL_A_RSS_CONTEXT]) request->rss_context = nla_get_u32(tb[ETHTOOL_A_RSS_CONTEXT]); if (tb[ETHTOOL_A_RSS_START_CONTEXT]) { NL_SET_BAD_ATTR(extack, tb[ETHTOOL_A_RSS_START_CONTEXT]); return -EINVAL; } return 0; } static int rss_prepare_get(const struct rss_req_info *request, struct net_device *dev, struct rss_reply_data *data, const struct genl_info *info) { struct ethtool_rxfh_param rxfh = {}; const struct ethtool_ops *ops; u32 total_size, indir_bytes; u8 *rss_config; int ret; ops = dev->ethtool_ops; ret = ethnl_ops_begin(dev); if (ret < 0) return ret; data->indir_size = 0; data->hkey_size = 0; if (ops->get_rxfh_indir_size) data->indir_size = ops->get_rxfh_indir_size(dev); if (ops->get_rxfh_key_size) data->hkey_size = ops->get_rxfh_key_size(dev); indir_bytes = data->indir_size * sizeof(u32); total_size = indir_bytes + data->hkey_size; rss_config = kzalloc(total_size, GFP_KERNEL); if (!rss_config) { ret = -ENOMEM; goto out_ops; } if (data->indir_size) data->indir_table = (u32 *)rss_config; if (data->hkey_size) data->hkey = rss_config + indir_bytes; rxfh.indir_size = data->indir_size; rxfh.indir = data->indir_table; rxfh.key_size = data->hkey_size; rxfh.key = data->hkey; ret = ops->get_rxfh(dev, &rxfh); if (ret) goto out_ops; data->hfunc = rxfh.hfunc; data->input_xfrm = rxfh.input_xfrm; out_ops: ethnl_ops_complete(dev); return ret; } static int rss_prepare_ctx(const struct rss_req_info *request, struct net_device *dev, struct rss_reply_data *data, const struct genl_info *info) { struct ethtool_rxfh_context *ctx; u32 total_size, indir_bytes; u8 *rss_config; data->no_key_fields = !dev->ethtool_ops->rxfh_per_ctx_key; ctx = xa_load(&dev->ethtool->rss_ctx, request->rss_context); if (!ctx) return -ENOENT; data->indir_size = ctx->indir_size; data->hkey_size = ctx->key_size; data->hfunc = ctx->hfunc; data->input_xfrm = ctx->input_xfrm; indir_bytes = data->indir_size * sizeof(u32); total_size = indir_bytes + data->hkey_size; rss_config = kzalloc(total_size, GFP_KERNEL); if (!rss_config) return -ENOMEM; data->indir_table = (u32 *)rss_config; memcpy(data->indir_table, ethtool_rxfh_context_indir(ctx), indir_bytes); if (data->hkey_size) { data->hkey = rss_config + indir_bytes; memcpy(data->hkey, ethtool_rxfh_context_key(ctx), data->hkey_size); } return 0; } static int rss_prepare_data(const struct ethnl_req_info *req_base, struct ethnl_reply_data *reply_base, const struct genl_info *info) { struct rss_reply_data *data = RSS_REPDATA(reply_base); struct rss_req_info *request = RSS_REQINFO(req_base); struct net_device *dev = reply_base->dev; const struct ethtool_ops *ops; ops = dev->ethtool_ops; if (!ops->get_rxfh) return -EOPNOTSUPP; /* Some drivers don't handle rss_context */ if (request->rss_context) { if (!ops->cap_rss_ctx_supported && !ops->create_rxfh_context) return -EOPNOTSUPP; return rss_prepare_ctx(request, dev, data, info); } return rss_prepare_get(request, dev, data, info); } static int rss_reply_size(const struct ethnl_req_info *req_base, const struct ethnl_reply_data *reply_base) { const struct rss_reply_data *data = RSS_REPDATA(reply_base); int len; len = nla_total_size(sizeof(u32)) + /* _RSS_CONTEXT */ nla_total_size(sizeof(u32)) + /* _RSS_HFUNC */ nla_total_size(sizeof(u32)) + /* _RSS_INPUT_XFRM */ nla_total_size(sizeof(u32) * data->indir_size) + /* _RSS_INDIR */ nla_total_size(data->hkey_size); /* _RSS_HKEY */ return len; } static int rss_fill_reply(struct sk_buff *skb, const struct ethnl_req_info *req_base, const struct ethnl_reply_data *reply_base) { const struct rss_reply_data *data = RSS_REPDATA(reply_base); struct rss_req_info *request = RSS_REQINFO(req_base); if (request->rss_context && nla_put_u32(skb, ETHTOOL_A_RSS_CONTEXT, request->rss_context)) return -EMSGSIZE; if ((data->indir_size && nla_put(skb, ETHTOOL_A_RSS_INDIR, sizeof(u32) * data->indir_size, data->indir_table))) return -EMSGSIZE; if (data->no_key_fields) return 0; if ((data->hfunc && nla_put_u32(skb, ETHTOOL_A_RSS_HFUNC, data->hfunc)) || (data->input_xfrm && nla_put_u32(skb, ETHTOOL_A_RSS_INPUT_XFRM, data->input_xfrm)) || (data->hkey_size && nla_put(skb, ETHTOOL_A_RSS_HKEY, data->hkey_size, data->hkey))) return -EMSGSIZE; return 0; } static void rss_cleanup_data(struct ethnl_reply_data *reply_base) { const struct rss_reply_data *data = RSS_REPDATA(reply_base); kfree(data->indir_table); } struct rss_nl_dump_ctx { unsigned long ifindex; unsigned long ctx_idx; /* User wants to only dump contexts from given ifindex */ unsigned int match_ifindex; unsigned int start_ctx; }; static struct rss_nl_dump_ctx *rss_dump_ctx(struct netlink_callback *cb) { NL_ASSERT_CTX_FITS(struct rss_nl_dump_ctx); return (struct rss_nl_dump_ctx *)cb->ctx; } int ethnl_rss_dump_start(struct netlink_callback *cb) { const struct genl_info *info = genl_info_dump(cb); struct rss_nl_dump_ctx *ctx = rss_dump_ctx(cb); struct ethnl_req_info req_info = {}; struct nlattr **tb = info->attrs; int ret; /* Filtering by context not supported */ if (tb[ETHTOOL_A_RSS_CONTEXT]) { NL_SET_BAD_ATTR(info->extack, tb[ETHTOOL_A_RSS_CONTEXT]); return -EINVAL; } if (tb[ETHTOOL_A_RSS_START_CONTEXT]) { ctx->start_ctx = nla_get_u32(tb[ETHTOOL_A_RSS_START_CONTEXT]); ctx->ctx_idx = ctx->start_ctx; } ret = ethnl_parse_header_dev_get(&req_info, tb[ETHTOOL_A_RSS_HEADER], sock_net(cb->skb->sk), cb->extack, false); if (req_info.dev) { ctx->match_ifindex = req_info.dev->ifindex; ctx->ifindex = ctx->match_ifindex; ethnl_parse_header_dev_put(&req_info); req_info.dev = NULL; } return ret; } static int rss_dump_one_ctx(struct sk_buff *skb, struct netlink_callback *cb, struct net_device *dev, u32 rss_context) { const struct genl_info *info = genl_info_dump(cb); struct rss_reply_data data = {}; struct rss_req_info req = {}; void *ehdr; int ret; req.rss_context = rss_context; ehdr = ethnl_dump_put(skb, cb, ETHTOOL_MSG_RSS_GET_REPLY); if (!ehdr) return -EMSGSIZE; ret = ethnl_fill_reply_header(skb, dev, ETHTOOL_A_RSS_HEADER); if (ret < 0) goto err_cancel; /* Context 0 is not currently storred or cached in the XArray */ if (!rss_context) ret = rss_prepare_get(&req, dev, &data, info); else ret = rss_prepare_ctx(&req, dev, &data, info); if (ret) goto err_cancel; ret = rss_fill_reply(skb, &req.base, &data.base); if (ret) goto err_cleanup; genlmsg_end(skb, ehdr); rss_cleanup_data(&data.base); return 0; err_cleanup: rss_cleanup_data(&data.base); err_cancel: genlmsg_cancel(skb, ehdr); return ret; } static int rss_dump_one_dev(struct sk_buff *skb, struct netlink_callback *cb, struct net_device *dev) { struct rss_nl_dump_ctx *ctx = rss_dump_ctx(cb); int ret; if (!dev->ethtool_ops->get_rxfh) return 0; if (!ctx->ctx_idx) { ret = rss_dump_one_ctx(skb, cb, dev, 0); if (ret) return ret; ctx->ctx_idx++; } for (; xa_find(&dev->ethtool->rss_ctx, &ctx->ctx_idx, ULONG_MAX, XA_PRESENT); ctx->ctx_idx++) { ret = rss_dump_one_ctx(skb, cb, dev, ctx->ctx_idx); if (ret) return ret; } ctx->ctx_idx = ctx->start_ctx; return 0; } int ethnl_rss_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct rss_nl_dump_ctx *ctx = rss_dump_ctx(cb); struct net *net = sock_net(skb->sk); struct net_device *dev; int ret = 0; rtnl_lock(); for_each_netdev_dump(net, dev, ctx->ifindex) { if (ctx->match_ifindex && ctx->match_ifindex != ctx->ifindex) break; ret = rss_dump_one_dev(skb, cb, dev); if (ret) break; } rtnl_unlock(); return ret; } const struct ethnl_request_ops ethnl_rss_request_ops = { .request_cmd = ETHTOOL_MSG_RSS_GET, .reply_cmd = ETHTOOL_MSG_RSS_GET_REPLY, .hdr_attr = ETHTOOL_A_RSS_HEADER, .req_info_size = sizeof(struct rss_req_info), .reply_data_size = sizeof(struct rss_reply_data), .parse_request = rss_parse_request, .prepare_data = rss_prepare_data, .reply_size = rss_reply_size, .fill_reply = rss_fill_reply, .cleanup_data = rss_cleanup_data, };
79 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 /* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _INPUT_COMPAT_H #define _INPUT_COMPAT_H /* * 32bit compatibility wrappers for the input subsystem. * * Very heavily based on evdev.c - Copyright (c) 1999-2002 Vojtech Pavlik */ #include <linux/compiler.h> #include <linux/compat.h> #include <linux/input.h> #ifdef CONFIG_COMPAT struct input_event_compat { compat_ulong_t sec; compat_ulong_t usec; __u16 type; __u16 code; __s32 value; }; struct ff_periodic_effect_compat { __u16 waveform; __u16 period; __s16 magnitude; __s16 offset; __u16 phase; struct ff_envelope envelope; __u32 custom_len; compat_uptr_t custom_data; }; struct ff_effect_compat { __u16 type; __s16 id; __u16 direction; struct ff_trigger trigger; struct ff_replay replay; union { struct ff_constant_effect constant; struct ff_ramp_effect ramp; struct ff_periodic_effect_compat periodic; struct ff_condition_effect condition[2]; /* One for each axis */ struct ff_rumble_effect rumble; } u; }; static inline size_t input_event_size(void) { return (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) ? sizeof(struct input_event_compat) : sizeof(struct input_event); } #else static inline size_t input_event_size(void) { return sizeof(struct input_event); } #endif /* CONFIG_COMPAT */ int input_event_from_user(const char __user *buffer, struct input_event *event); int input_event_to_user(char __user *buffer, const struct input_event *event); int input_ff_effect_from_user(const char __user *buffer, size_t size, struct ff_effect *effect); #endif /* _INPUT_COMPAT_H */
16 16 10 6 16 8 23 23 23 23 23 16 17 2 16 16 11 30 27 28 28 27 28 23 26 24 1 23 23 23 12 11 12 13 12 19 40 41 40 31 31 1 30 8 30 31 20 22 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 // SPDX-License-Identifier: GPL-2.0-or-later /* * linux/mm/process_vm_access.c * * Copyright (C) 2010-2011 Christopher Yeoh <cyeoh@au1.ibm.com>, IBM Corp. */ #include <linux/compat.h> #include <linux/mm.h> #include <linux/uio.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/highmem.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/syscalls.h> /** * process_vm_rw_pages - read/write pages from task specified * @pages: array of pointers to pages we want to copy * @offset: offset in page to start copying from/to * @len: number of bytes to copy * @iter: where to copy to/from locally * @vm_write: 0 means copy from, 1 means copy to * Returns 0 on success, error code otherwise */ static int process_vm_rw_pages(struct page **pages, unsigned offset, size_t len, struct iov_iter *iter, int vm_write) { /* Do the copy for each page */ while (len && iov_iter_count(iter)) { struct page *page = *pages++; size_t copy = PAGE_SIZE - offset; size_t copied; if (copy > len) copy = len; if (vm_write) copied = copy_page_from_iter(page, offset, copy, iter); else copied = copy_page_to_iter(page, offset, copy, iter); len -= copied; if (copied < copy && iov_iter_count(iter)) return -EFAULT; offset = 0; } return 0; } /* Maximum number of pages kmalloc'd to hold struct page's during copy */ #define PVM_MAX_KMALLOC_PAGES 2 /* Maximum number of pages that can be stored at a time */ #define PVM_MAX_USER_PAGES (PVM_MAX_KMALLOC_PAGES * PAGE_SIZE / sizeof(struct page *)) /** * process_vm_rw_single_vec - read/write pages from task specified * @addr: start memory address of target process * @len: size of area to copy to/from * @iter: where to copy to/from locally * @process_pages: struct pages area that can store at least * nr_pages_to_copy struct page pointers * @mm: mm for task * @task: task to read/write from * @vm_write: 0 means copy from, 1 means copy to * Returns 0 on success or on failure error code */ static int process_vm_rw_single_vec(unsigned long addr, unsigned long len, struct iov_iter *iter, struct page **process_pages, struct mm_struct *mm, struct task_struct *task, int vm_write) { unsigned long pa = addr & PAGE_MASK; unsigned long start_offset = addr - pa; unsigned long nr_pages; ssize_t rc = 0; unsigned int flags = 0; /* Work out address and page range required */ if (len == 0) return 0; nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; if (vm_write) flags |= FOLL_WRITE; while (!rc && nr_pages && iov_iter_count(iter)) { int pinned_pages = min_t(unsigned long, nr_pages, PVM_MAX_USER_PAGES); int locked = 1; size_t bytes; /* * Get the pages we're interested in. We must * access remotely because task/mm might not * current/current->mm */ mmap_read_lock(mm); pinned_pages = pin_user_pages_remote(mm, pa, pinned_pages, flags, process_pages, &locked); if (locked) mmap_read_unlock(mm); if (pinned_pages <= 0) return -EFAULT; bytes = pinned_pages * PAGE_SIZE - start_offset; if (bytes > len) bytes = len; rc = process_vm_rw_pages(process_pages, start_offset, bytes, iter, vm_write); len -= bytes; start_offset = 0; nr_pages -= pinned_pages; pa += pinned_pages * PAGE_SIZE; /* If vm_write is set, the pages need to be made dirty: */ unpin_user_pages_dirty_lock(process_pages, pinned_pages, vm_write); } return rc; } /* Maximum number of entries for process pages array which lives on stack */ #define PVM_MAX_PP_ARRAY_COUNT 16 /** * process_vm_rw_core - core of reading/writing pages from task specified * @pid: PID of process to read/write from/to * @iter: where to copy to/from locally * @rvec: iovec array specifying where to copy to/from in the other process * @riovcnt: size of rvec array * @flags: currently unused * @vm_write: 0 if reading from other process, 1 if writing to other process * * Returns the number of bytes read/written or error code. May * return less bytes than expected if an error occurs during the copying * process. */ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter, const struct iovec *rvec, unsigned long riovcnt, unsigned long flags, int vm_write) { struct task_struct *task; struct page *pp_stack[PVM_MAX_PP_ARRAY_COUNT]; struct page **process_pages = pp_stack; struct mm_struct *mm; unsigned long i; ssize_t rc = 0; unsigned long nr_pages = 0; unsigned long nr_pages_iov; ssize_t iov_len; size_t total_len = iov_iter_count(iter); /* * Work out how many pages of struct pages we're going to need * when eventually calling get_user_pages */ for (i = 0; i < riovcnt; i++) { iov_len = rvec[i].iov_len; if (iov_len > 0) { nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len - 1) / PAGE_SIZE - (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1; nr_pages = max(nr_pages, nr_pages_iov); } } if (nr_pages == 0) return 0; if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) { /* For reliability don't try to kmalloc more than 2 pages worth */ process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES * PAGE_SIZE, sizeof(struct page *)*nr_pages), GFP_KERNEL); if (!process_pages) return -ENOMEM; } /* Get process information */ task = find_get_task_by_vpid(pid); if (!task) { rc = -ESRCH; goto free_proc_pages; } mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS); if (IS_ERR(mm)) { rc = PTR_ERR(mm); /* * Explicitly map EACCES to EPERM as EPERM is a more * appropriate error code for process_vw_readv/writev */ if (rc == -EACCES) rc = -EPERM; goto put_task_struct; } for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++) rc = process_vm_rw_single_vec( (unsigned long)rvec[i].iov_base, rvec[i].iov_len, iter, process_pages, mm, task, vm_write); /* copied = space before - space after */ total_len -= iov_iter_count(iter); /* If we have managed to copy any data at all then we return the number of bytes copied. Otherwise we return the error code */ if (total_len) rc = total_len; mmput(mm); put_task_struct: put_task_struct(task); free_proc_pages: if (process_pages != pp_stack) kfree(process_pages); return rc; } /** * process_vm_rw - check iovecs before calling core routine * @pid: PID of process to read/write from/to * @lvec: iovec array specifying where to copy to/from locally * @liovcnt: size of lvec array * @rvec: iovec array specifying where to copy to/from in the other process * @riovcnt: size of rvec array * @flags: currently unused * @vm_write: 0 if reading from other process, 1 if writing to other process * * Returns the number of bytes read/written or error code. May * return less bytes than expected if an error occurs during the copying * process. */ static ssize_t process_vm_rw(pid_t pid, const struct iovec __user *lvec, unsigned long liovcnt, const struct iovec __user *rvec, unsigned long riovcnt, unsigned long flags, int vm_write) { struct iovec iovstack_l[UIO_FASTIOV]; struct iovec iovstack_r[UIO_FASTIOV]; struct iovec *iov_l = iovstack_l; struct iovec *iov_r; struct iov_iter iter; ssize_t rc; int dir = vm_write ? ITER_SOURCE : ITER_DEST; if (flags != 0) return -EINVAL; /* Check iovecs */ rc = import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter); if (rc < 0) return rc; if (!iov_iter_count(&iter)) goto free_iov_l; iov_r = iovec_from_user(rvec, riovcnt, UIO_FASTIOV, iovstack_r, in_compat_syscall()); if (IS_ERR(iov_r)) { rc = PTR_ERR(iov_r); goto free_iov_l; } rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write); if (iov_r != iovstack_r) kfree(iov_r); free_iov_l: kfree(iov_l); return rc; } SYSCALL_DEFINE6(process_vm_readv, pid_t, pid, const struct iovec __user *, lvec, unsigned long, liovcnt, const struct iovec __user *, rvec, unsigned long, riovcnt, unsigned long, flags) { return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 0); } SYSCALL_DEFINE6(process_vm_writev, pid_t, pid, const struct iovec __user *, lvec, unsigned long, liovcnt, const struct iovec __user *, rvec, unsigned long, riovcnt, unsigned long, flags) { return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 1); }
8 275 339 339 290 93 84 47 47 93 347 348 348 348 347 347 339 338 75 75 332 333 348 333 333 348 222 222 222 158 88 70 222 222 83 83 222 213 33 107 107 61 107 107 6 107 107 60 11 260 47 212 53 160 147 107 147 13 95 201 113 95 18 242 242 242 241 4 242 242 241 241 242 242 25 19 18 241 240 241 241 169 259 13 47 259 17 259 260 260 258 259 13 260 260 9 16 259 259 13 260 259 259 260 260 259 259 13 258 3 10 1 1 258 13 260 247 247 256 208 208 202 202 202 174 197 197 197 196 201 201 193 171 171 171 73 73 208 197 208 66 64 1 61 3 63 63 61 48 61 46 44 15 15 46 46 46 46 46 46 46 2 2 59 61 2 2 61 19 61 16 61 3 60 61 60 61 62 1 47 63 63 46 66 62 5 5 5 5 5 22 6 22 5 254 242 253 12 12 12 12 11 11 11 11 11 11 155 157 149 138 157 156 171 171 162 36 36 171 4 4 4 4 4 4 3 1 4 1 4 4 32 32 32 32 16 16 32 32 32 4 32 16 16 16 42 33 33 33 33 32 33 33 14 33 33 42 17 42 9 42 1 1 38 38 37 36 36 30 36 6 35 36 36 2 11 11 11 9 9 8 8 7 8 5 8 8 8 8 1 11 146 146 139 139 138 138 138 137 25 13 10 3 145 138 11 138 4 138 135 89 46 138 138 138 145 11 4 8 8 8 8 142 142 2 2 1 4 146 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 // SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/fat/dir.c * * directory handling functions for fat-based filesystems * * Written 1992,1993 by Werner Almesberger * * Hidden files 1995 by Albert Cahalan <albert@ccs.neu.edu> <adc@coe.neu.edu> * * VFAT extensions by Gordon Chaffee <chaffee@plateau.cs.berkeley.edu> * Merged with msdos fs by Henrik Storner <storner@osiris.ping.dk> * Rewritten for constant inumbers. Plugged buffer overrun in readdir(). AV * Short name translation 1999, 2001 by Wolfram Pienkoss <wp@bszh.de> */ #include <linux/slab.h> #include <linux/compat.h> #include <linux/uaccess.h> #include <linux/iversion.h> #include "fat.h" /* * Maximum buffer size of short name. * [(MSDOS_NAME + '.') * max one char + nul] * For msdos style, ['.' (hidden) + MSDOS_NAME + '.' + nul] */ #define FAT_MAX_SHORT_SIZE ((MSDOS_NAME + 1) * NLS_MAX_CHARSET_SIZE + 1) /* * Maximum buffer size of unicode chars from slots. * [(max longname slots * 13 (size in a slot) + nul) * sizeof(wchar_t)] */ #define FAT_MAX_UNI_CHARS ((MSDOS_SLOTS - 1) * 13 + 1) #define FAT_MAX_UNI_SIZE (FAT_MAX_UNI_CHARS * sizeof(wchar_t)) static inline unsigned char fat_tolower(unsigned char c) { return ((c >= 'A') && (c <= 'Z')) ? c+32 : c; } static inline loff_t fat_make_i_pos(struct super_block *sb, struct buffer_head *bh, struct msdos_dir_entry *de) { return ((loff_t)bh->b_blocknr << MSDOS_SB(sb)->dir_per_block_bits) | (de - (struct msdos_dir_entry *)bh->b_data); } static inline void fat_dir_readahead(struct inode *dir, sector_t iblock, sector_t phys) { struct super_block *sb = dir->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(sb); struct buffer_head *bh; int sec; /* This is not a first sector of cluster, or sec_per_clus == 1 */ if ((iblock & (sbi->sec_per_clus - 1)) || sbi->sec_per_clus == 1) return; /* root dir of FAT12/FAT16 */ if (!is_fat32(sbi) && (dir->i_ino == MSDOS_ROOT_INO)) return; bh = sb_find_get_block(sb, phys); if (bh == NULL || !buffer_uptodate(bh)) { for (sec = 0; sec < sbi->sec_per_clus; sec++) sb_breadahead(sb, phys + sec); } brelse(bh); } /* Returns the inode number of the directory entry at offset pos. If bh is non-NULL, it is brelse'd before. Pos is incremented. The buffer header is returned in bh. AV. Most often we do it item-by-item. Makes sense to optimize. AV. OK, there we go: if both bh and de are non-NULL we assume that we just AV. want the next entry (took one explicit de=NULL in vfat/namei.c). AV. It's done in fat_get_entry() (inlined), here the slow case lives. AV. Additionally, when we return -1 (i.e. reached the end of directory) AV. we make bh NULL. */ static int fat__get_entry(struct inode *dir, loff_t *pos, struct buffer_head **bh, struct msdos_dir_entry **de) { struct super_block *sb = dir->i_sb; sector_t phys, iblock; unsigned long mapped_blocks; int err, offset; next: brelse(*bh); *bh = NULL; iblock = *pos >> sb->s_blocksize_bits; err = fat_bmap(dir, iblock, &phys, &mapped_blocks, 0, false); if (err || !phys) return -1; /* beyond EOF or error */ fat_dir_readahead(dir, iblock, phys); *bh = sb_bread(sb, phys); if (*bh == NULL) { fat_msg_ratelimit(sb, KERN_ERR, "Directory bread(block %llu) failed", (llu)phys); /* skip this block */ *pos = (iblock + 1) << sb->s_blocksize_bits; goto next; } offset = *pos & (sb->s_blocksize - 1); *pos += sizeof(struct msdos_dir_entry); *de = (struct msdos_dir_entry *)((*bh)->b_data + offset); return 0; } static inline int fat_get_entry(struct inode *dir, loff_t *pos, struct buffer_head **bh, struct msdos_dir_entry **de) { /* Fast stuff first */ if (*bh && *de && (*de - (struct msdos_dir_entry *)(*bh)->b_data) < MSDOS_SB(dir->i_sb)->dir_per_block - 1) { *pos += sizeof(struct msdos_dir_entry); (*de)++; return 0; } return fat__get_entry(dir, pos, bh, de); } /* * Convert Unicode 16 to UTF-8, translated Unicode, or ASCII. * If uni_xlate is enabled and we can't get a 1:1 conversion, use a * colon as an escape character since it is normally invalid on the vfat * filesystem. The following four characters are the hexadecimal digits * of Unicode value. This lets us do a full dump and restore of Unicode * filenames. We could get into some trouble with long Unicode names, * but ignore that right now. * Ahem... Stack smashing in ring 0 isn't fun. Fixed. */ static int uni16_to_x8(struct super_block *sb, unsigned char *ascii, const wchar_t *uni, int len, struct nls_table *nls) { int uni_xlate = MSDOS_SB(sb)->options.unicode_xlate; const wchar_t *ip; wchar_t ec; unsigned char *op; int charlen; ip = uni; op = ascii; while (*ip && ((len - NLS_MAX_CHARSET_SIZE) > 0)) { ec = *ip++; charlen = nls->uni2char(ec, op, NLS_MAX_CHARSET_SIZE); if (charlen > 0) { op += charlen; len -= charlen; } else { if (uni_xlate == 1) { *op++ = ':'; op = hex_byte_pack(op, ec >> 8); op = hex_byte_pack(op, ec); len -= 5; } else { *op++ = '?'; len--; } } } if (unlikely(*ip)) { fat_msg(sb, KERN_WARNING, "filename was truncated while converting."); } *op = 0; return op - ascii; } static inline int fat_uni_to_x8(struct super_block *sb, const wchar_t *uni, unsigned char *buf, int size) { struct msdos_sb_info *sbi = MSDOS_SB(sb); if (sbi->options.utf8) return utf16s_to_utf8s(uni, FAT_MAX_UNI_CHARS, UTF16_HOST_ENDIAN, buf, size); else return uni16_to_x8(sb, buf, uni, size, sbi->nls_io); } static inline int fat_short2uni(struct nls_table *t, unsigned char *c, int clen, wchar_t *uni) { int charlen; charlen = t->char2uni(c, clen, uni); if (charlen < 0) { *uni = 0x003f; /* a question mark */ charlen = 1; } return charlen; } static inline int fat_short2lower_uni(struct nls_table *t, unsigned char *c, int clen, wchar_t *uni) { int charlen; wchar_t wc; charlen = t->char2uni(c, clen, &wc); if (charlen < 0) { *uni = 0x003f; /* a question mark */ charlen = 1; } else if (charlen <= 1) { unsigned char nc = t->charset2lower[*c]; if (!nc) nc = *c; charlen = t->char2uni(&nc, 1, uni); if (charlen < 0) { *uni = 0x003f; /* a question mark */ charlen = 1; } } else *uni = wc; return charlen; } static inline int fat_shortname2uni(struct nls_table *nls, unsigned char *buf, int buf_size, wchar_t *uni_buf, unsigned short opt, int lower) { int len = 0; if (opt & VFAT_SFN_DISPLAY_LOWER) len = fat_short2lower_uni(nls, buf, buf_size, uni_buf); else if (opt & VFAT_SFN_DISPLAY_WIN95) len = fat_short2uni(nls, buf, buf_size, uni_buf); else if (opt & VFAT_SFN_DISPLAY_WINNT) { if (lower) len = fat_short2lower_uni(nls, buf, buf_size, uni_buf); else len = fat_short2uni(nls, buf, buf_size, uni_buf); } else len = fat_short2uni(nls, buf, buf_size, uni_buf); return len; } static inline int fat_name_match(struct msdos_sb_info *sbi, const unsigned char *a, int a_len, const unsigned char *b, int b_len) { if (a_len != b_len) return 0; if (sbi->options.name_check != 's') return !nls_strnicmp(sbi->nls_io, a, b, a_len); else return !memcmp(a, b, a_len); } enum { PARSE_INVALID = 1, PARSE_NOT_LONGNAME, PARSE_EOF, }; /** * fat_parse_long - Parse extended directory entry. * * @dir: Pointer to the inode that represents the directory. * @pos: On input, contains the starting position to read from. * On output, updated with the new position. * @bh: Pointer to the buffer head that may be used for reading directory * entries. May be updated. * @de: On input, points to the current directory entry. * On output, points to the next directory entry. * @unicode: Pointer to a buffer where the parsed Unicode long filename will be * stored. * @nr_slots: Pointer to a variable that will store the number of longname * slots found. * * This function returns zero on success, negative value on error, or one of * the following: * * %PARSE_INVALID - Directory entry is invalid. * %PARSE_NOT_LONGNAME - Directory entry does not contain longname. * %PARSE_EOF - Directory has no more entries. */ static int fat_parse_long(struct inode *dir, loff_t *pos, struct buffer_head **bh, struct msdos_dir_entry **de, wchar_t **unicode, unsigned char *nr_slots) { struct msdos_dir_slot *ds; unsigned char id, slot, slots, alias_checksum; if (!*unicode) { *unicode = __getname(); if (!*unicode) { brelse(*bh); return -ENOMEM; } } parse_long: ds = (struct msdos_dir_slot *)*de; id = ds->id; if (!(id & 0x40)) return PARSE_INVALID; slots = id & ~0x40; if (slots > 20 || !slots) /* ceil(256 * 2 / 26) */ return PARSE_INVALID; *nr_slots = slots; alias_checksum = ds->alias_checksum; slot = slots; while (1) { int offset; slot--; offset = slot * 13; fat16_towchar(*unicode + offset, ds->name0_4, 5); fat16_towchar(*unicode + offset + 5, ds->name5_10, 6); fat16_towchar(*unicode + offset + 11, ds->name11_12, 2); if (ds->id & 0x40) (*unicode)[offset + 13] = 0; if (fat_get_entry(dir, pos, bh, de) < 0) return PARSE_EOF; if (slot == 0) break; ds = (struct msdos_dir_slot *)*de; if (ds->attr != ATTR_EXT) return PARSE_NOT_LONGNAME; if ((ds->id & ~0x40) != slot) goto parse_long; if (ds->alias_checksum != alias_checksum) goto parse_long; } if ((*de)->name[0] == DELETED_FLAG) return PARSE_INVALID; if ((*de)->attr == ATTR_EXT) goto parse_long; if (IS_FREE((*de)->name) || ((*de)->attr & ATTR_VOLUME)) return PARSE_INVALID; if (fat_checksum((*de)->name) != alias_checksum) *nr_slots = 0; return 0; } /** * fat_parse_short - Parse MS-DOS (short) directory entry. * @sb: superblock * @de: directory entry to parse * @name: FAT_MAX_SHORT_SIZE array in which to place extracted name * @dot_hidden: Nonzero == prepend '.' to names with ATTR_HIDDEN * * Returns the number of characters extracted into 'name'. */ static int fat_parse_short(struct super_block *sb, const struct msdos_dir_entry *de, unsigned char *name, int dot_hidden) { const struct msdos_sb_info *sbi = MSDOS_SB(sb); int isvfat = sbi->options.isvfat; int nocase = sbi->options.nocase; unsigned short opt_shortname = sbi->options.shortname; struct nls_table *nls_disk = sbi->nls_disk; wchar_t uni_name[14]; unsigned char c, work[MSDOS_NAME]; unsigned char *ptname = name; int chi, chl, i, j, k; int dotoffset = 0; int name_len = 0, uni_len = 0; if (!isvfat && dot_hidden && (de->attr & ATTR_HIDDEN)) { *ptname++ = '.'; dotoffset = 1; } memcpy(work, de->name, sizeof(work)); /* For an explanation of the special treatment of 0x05 in * filenames, see msdos_format_name in namei_msdos.c */ if (work[0] == 0x05) work[0] = 0xE5; /* Filename */ for (i = 0, j = 0; i < 8;) { c = work[i]; if (!c) break; chl = fat_shortname2uni(nls_disk, &work[i], 8 - i, &uni_name[j++], opt_shortname, de->lcase & CASE_LOWER_BASE); if (chl <= 1) { if (!isvfat) ptname[i] = nocase ? c : fat_tolower(c); i++; if (c != ' ') { name_len = i; uni_len = j; } } else { uni_len = j; if (isvfat) i += min(chl, 8-i); else { for (chi = 0; chi < chl && i < 8; chi++, i++) ptname[i] = work[i]; } if (chl) name_len = i; } } i = name_len; j = uni_len; fat_short2uni(nls_disk, ".", 1, &uni_name[j++]); if (!isvfat) ptname[i] = '.'; i++; /* Extension */ for (k = 8; k < MSDOS_NAME;) { c = work[k]; if (!c) break; chl = fat_shortname2uni(nls_disk, &work[k], MSDOS_NAME - k, &uni_name[j++], opt_shortname, de->lcase & CASE_LOWER_EXT); if (chl <= 1) { k++; if (!isvfat) ptname[i] = nocase ? c : fat_tolower(c); i++; if (c != ' ') { name_len = i; uni_len = j; } } else { uni_len = j; if (isvfat) { int offset = min(chl, MSDOS_NAME-k); k += offset; i += offset; } else { for (chi = 0; chi < chl && k < MSDOS_NAME; chi++, i++, k++) { ptname[i] = work[k]; } } if (chl) name_len = i; } } if (name_len > 0) { name_len += dotoffset; if (sbi->options.isvfat) { uni_name[uni_len] = 0x0000; name_len = fat_uni_to_x8(sb, uni_name, name, FAT_MAX_SHORT_SIZE); } } return name_len; } /* * Return values: negative -> error/not found, 0 -> found. */ int fat_search_long(struct inode *inode, const unsigned char *name, int name_len, struct fat_slot_info *sinfo) { struct super_block *sb = inode->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(sb); struct buffer_head *bh = NULL; struct msdos_dir_entry *de; unsigned char nr_slots; wchar_t *unicode = NULL; unsigned char bufname[FAT_MAX_SHORT_SIZE]; loff_t cpos = 0; int err, len; err = -ENOENT; while (1) { if (fat_get_entry(inode, &cpos, &bh, &de) == -1) goto end_of_dir; parse_record: nr_slots = 0; if (de->name[0] == DELETED_FLAG) continue; if (de->attr != ATTR_EXT && (de->attr & ATTR_VOLUME)) continue; if (de->attr != ATTR_EXT && IS_FREE(de->name)) continue; if (de->attr == ATTR_EXT) { int status = fat_parse_long(inode, &cpos, &bh, &de, &unicode, &nr_slots); if (status < 0) { err = status; goto end_of_dir; } else if (status == PARSE_INVALID) continue; else if (status == PARSE_NOT_LONGNAME) goto parse_record; else if (status == PARSE_EOF) goto end_of_dir; } /* Never prepend '.' to hidden files here. * That is done only for msdos mounts (and only when * 'dotsOK=yes'); if we are executing here, it is in the * context of a vfat mount. */ len = fat_parse_short(sb, de, bufname, 0); if (len == 0) continue; /* Compare shortname */ if (fat_name_match(sbi, name, name_len, bufname, len)) goto found; if (nr_slots) { void *longname = unicode + FAT_MAX_UNI_CHARS; int size = PATH_MAX - FAT_MAX_UNI_SIZE; /* Compare longname */ len = fat_uni_to_x8(sb, unicode, longname, size); if (fat_name_match(sbi, name, name_len, longname, len)) goto found; } } found: nr_slots++; /* include the de */ sinfo->slot_off = cpos - nr_slots * sizeof(*de); sinfo->nr_slots = nr_slots; sinfo->de = de; sinfo->bh = bh; sinfo->i_pos = fat_make_i_pos(sb, sinfo->bh, sinfo->de); err = 0; end_of_dir: if (unicode) __putname(unicode); return err; } EXPORT_SYMBOL_GPL(fat_search_long); struct fat_ioctl_filldir_callback { struct dir_context ctx; void __user *dirent; int result; /* for dir ioctl */ const char *longname; int long_len; const char *shortname; int short_len; }; static int __fat_readdir(struct inode *inode, struct file *file, struct dir_context *ctx, int short_only, struct fat_ioctl_filldir_callback *both) { struct super_block *sb = inode->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(sb); struct buffer_head *bh; struct msdos_dir_entry *de; unsigned char nr_slots; wchar_t *unicode = NULL; unsigned char bufname[FAT_MAX_SHORT_SIZE]; int isvfat = sbi->options.isvfat; const char *fill_name = NULL; int fake_offset = 0; loff_t cpos; int short_len = 0, fill_len = 0; int ret = 0; mutex_lock(&sbi->s_lock); cpos = ctx->pos; /* Fake . and .. for the root directory. */ if (inode->i_ino == MSDOS_ROOT_INO) { if (!dir_emit_dots(file, ctx)) goto out; if (ctx->pos == 2) { fake_offset = 1; cpos = 0; } } if (cpos & (sizeof(struct msdos_dir_entry) - 1)) { ret = -ENOENT; goto out; } bh = NULL; get_new: if (fat_get_entry(inode, &cpos, &bh, &de) == -1) goto end_of_dir; parse_record: nr_slots = 0; /* * Check for long filename entry, but if short_only, we don't * need to parse long filename. */ if (isvfat && !short_only) { if (de->name[0] == DELETED_FLAG) goto record_end; if (de->attr != ATTR_EXT && (de->attr & ATTR_VOLUME)) goto record_end; if (de->attr != ATTR_EXT && IS_FREE(de->name)) goto record_end; } else { if ((de->attr & ATTR_VOLUME) || IS_FREE(de->name)) goto record_end; } if (isvfat && de->attr == ATTR_EXT) { int status = fat_parse_long(inode, &cpos, &bh, &de, &unicode, &nr_slots); if (status < 0) { bh = NULL; ret = status; goto end_of_dir; } else if (status == PARSE_INVALID) goto record_end; else if (status == PARSE_NOT_LONGNAME) goto parse_record; else if (status == PARSE_EOF) goto end_of_dir; if (nr_slots) { void *longname = unicode + FAT_MAX_UNI_CHARS; int size = PATH_MAX - FAT_MAX_UNI_SIZE; int len = fat_uni_to_x8(sb, unicode, longname, size); fill_name = longname; fill_len = len; /* !both && !short_only, so we don't need shortname. */ if (!both) goto start_filldir; short_len = fat_parse_short(sb, de, bufname, sbi->options.dotsOK); if (short_len == 0) goto record_end; /* hack for fat_ioctl_filldir() */ both->longname = fill_name; both->long_len = fill_len; both->shortname = bufname; both->short_len = short_len; fill_name = NULL; fill_len = 0; goto start_filldir; } } short_len = fat_parse_short(sb, de, bufname, sbi->options.dotsOK); if (short_len == 0) goto record_end; fill_name = bufname; fill_len = short_len; start_filldir: ctx->pos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry); if (fake_offset && ctx->pos < 2) ctx->pos = 2; if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME)) { if (!dir_emit_dot(file, ctx)) goto fill_failed; } else if (!memcmp(de->name, MSDOS_DOTDOT, MSDOS_NAME)) { if (!dir_emit_dotdot(file, ctx)) goto fill_failed; } else { unsigned long inum; loff_t i_pos = fat_make_i_pos(sb, bh, de); struct inode *tmp = fat_iget(sb, i_pos); if (tmp) { inum = tmp->i_ino; iput(tmp); } else inum = iunique(sb, MSDOS_ROOT_INO); if (!dir_emit(ctx, fill_name, fill_len, inum, (de->attr & ATTR_DIR) ? DT_DIR : DT_REG)) goto fill_failed; } record_end: fake_offset = 0; ctx->pos = cpos; goto get_new; end_of_dir: if (fake_offset && cpos < 2) ctx->pos = 2; else ctx->pos = cpos; fill_failed: brelse(bh); if (unicode) __putname(unicode); out: mutex_unlock(&sbi->s_lock); return ret; } static int fat_readdir(struct file *file, struct dir_context *ctx) { return __fat_readdir(file_inode(file), file, ctx, 0, NULL); } #define FAT_IOCTL_FILLDIR_FUNC(func, dirent_type) \ static bool func(struct dir_context *ctx, const char *name, int name_len, \ loff_t offset, u64 ino, unsigned int d_type) \ { \ struct fat_ioctl_filldir_callback *buf = \ container_of(ctx, struct fat_ioctl_filldir_callback, ctx); \ struct dirent_type __user *d1 = buf->dirent; \ struct dirent_type __user *d2 = d1 + 1; \ \ if (buf->result) \ return false; \ buf->result++; \ \ if (name != NULL) { \ /* dirent has only short name */ \ if (name_len >= sizeof(d1->d_name)) \ name_len = sizeof(d1->d_name) - 1; \ \ if (put_user(0, &d2->d_name[0]) || \ put_user(0, &d2->d_reclen) || \ copy_to_user(d1->d_name, name, name_len) || \ put_user(0, d1->d_name + name_len) || \ put_user(name_len, &d1->d_reclen)) \ goto efault; \ } else { \ /* dirent has short and long name */ \ const char *longname = buf->longname; \ int long_len = buf->long_len; \ const char *shortname = buf->shortname; \ int short_len = buf->short_len; \ \ if (long_len >= sizeof(d1->d_name)) \ long_len = sizeof(d1->d_name) - 1; \ if (short_len >= sizeof(d1->d_name)) \ short_len = sizeof(d1->d_name) - 1; \ \ if (copy_to_user(d2->d_name, longname, long_len) || \ put_user(0, d2->d_name + long_len) || \ put_user(long_len, &d2->d_reclen) || \ put_user(ino, &d2->d_ino) || \ put_user(offset, &d2->d_off) || \ copy_to_user(d1->d_name, shortname, short_len) || \ put_user(0, d1->d_name + short_len) || \ put_user(short_len, &d1->d_reclen)) \ goto efault; \ } \ return true; \ efault: \ buf->result = -EFAULT; \ return false; \ } FAT_IOCTL_FILLDIR_FUNC(fat_ioctl_filldir, __fat_dirent) static int fat_ioctl_readdir(struct inode *inode, struct file *file, void __user *dirent, filldir_t filldir, int short_only, int both) { struct fat_ioctl_filldir_callback buf = { .ctx.actor = filldir, .dirent = dirent }; int ret; buf.dirent = dirent; buf.result = 0; inode_lock_shared(inode); buf.ctx.pos = file->f_pos; ret = -ENOENT; if (!IS_DEADDIR(inode)) { ret = __fat_readdir(inode, file, &buf.ctx, short_only, both ? &buf : NULL); file->f_pos = buf.ctx.pos; } inode_unlock_shared(inode); if (ret >= 0) ret = buf.result; return ret; } static long fat_dir_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(filp); struct __fat_dirent __user *d1 = (struct __fat_dirent __user *)arg; int short_only, both; switch (cmd) { case VFAT_IOCTL_READDIR_SHORT: short_only = 1; both = 0; break; case VFAT_IOCTL_READDIR_BOTH: short_only = 0; both = 1; break; default: return fat_generic_ioctl(filp, cmd, arg); } /* * Yes, we don't need this put_user() absolutely. However old * code didn't return the right value. So, app use this value, * in order to check whether it is EOF. */ if (put_user(0, &d1->d_reclen)) return -EFAULT; return fat_ioctl_readdir(inode, filp, d1, fat_ioctl_filldir, short_only, both); } #ifdef CONFIG_COMPAT #define VFAT_IOCTL_READDIR_BOTH32 _IOR('r', 1, struct compat_dirent[2]) #define VFAT_IOCTL_READDIR_SHORT32 _IOR('r', 2, struct compat_dirent[2]) FAT_IOCTL_FILLDIR_FUNC(fat_compat_ioctl_filldir, compat_dirent) static long fat_compat_dir_ioctl(struct file *filp, unsigned cmd, unsigned long arg) { struct inode *inode = file_inode(filp); struct compat_dirent __user *d1 = compat_ptr(arg); int short_only, both; switch (cmd) { case VFAT_IOCTL_READDIR_SHORT32: short_only = 1; both = 0; break; case VFAT_IOCTL_READDIR_BOTH32: short_only = 0; both = 1; break; default: return fat_generic_ioctl(filp, cmd, (unsigned long)arg); } /* * Yes, we don't need this put_user() absolutely. However old * code didn't return the right value. So, app use this value, * in order to check whether it is EOF. */ if (put_user(0, &d1->d_reclen)) return -EFAULT; return fat_ioctl_readdir(inode, filp, d1, fat_compat_ioctl_filldir, short_only, both); } #endif /* CONFIG_COMPAT */ const struct file_operations fat_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .iterate_shared = fat_readdir, .unlocked_ioctl = fat_dir_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = fat_compat_dir_ioctl, #endif .fsync = fat_file_fsync, }; static int fat_get_short_entry(struct inode *dir, loff_t *pos, struct buffer_head **bh, struct msdos_dir_entry **de) { while (fat_get_entry(dir, pos, bh, de) >= 0) { /* free entry or long name entry or volume label */ if (!IS_FREE((*de)->name) && !((*de)->attr & ATTR_VOLUME)) return 0; } return -ENOENT; } /* * The ".." entry can not provide the "struct fat_slot_info" information * for inode, nor a usable i_pos. So, this function provides some information * only. * * Since this function walks through the on-disk inodes within a directory, * callers are responsible for taking any locks necessary to prevent the * directory from changing. */ int fat_get_dotdot_entry(struct inode *dir, struct buffer_head **bh, struct msdos_dir_entry **de) { loff_t offset = 0; *de = NULL; while (fat_get_short_entry(dir, &offset, bh, de) >= 0) { if (!strncmp((*de)->name, MSDOS_DOTDOT, MSDOS_NAME)) return 0; } return -ENOENT; } EXPORT_SYMBOL_GPL(fat_get_dotdot_entry); /* See if directory is empty */ int fat_dir_empty(struct inode *dir) { struct buffer_head *bh; struct msdos_dir_entry *de; loff_t cpos; int result = 0; bh = NULL; cpos = 0; while (fat_get_short_entry(dir, &cpos, &bh, &de) >= 0) { if (strncmp(de->name, MSDOS_DOT , MSDOS_NAME) && strncmp(de->name, MSDOS_DOTDOT, MSDOS_NAME)) { result = -ENOTEMPTY; break; } } brelse(bh); return result; } EXPORT_SYMBOL_GPL(fat_dir_empty); /* * fat_subdirs counts the number of sub-directories of dir. It can be run * on directories being created. */ int fat_subdirs(struct inode *dir) { struct buffer_head *bh; struct msdos_dir_entry *de; loff_t cpos; int count = 0; bh = NULL; cpos = 0; while (fat_get_short_entry(dir, &cpos, &bh, &de) >= 0) { if (de->attr & ATTR_DIR) count++; } brelse(bh); return count; } /* * Scans a directory for a given file (name points to its formatted name). * Returns an error code or zero. */ int fat_scan(struct inode *dir, const unsigned char *name, struct fat_slot_info *sinfo) { struct super_block *sb = dir->i_sb; sinfo->slot_off = 0; sinfo->bh = NULL; while (fat_get_short_entry(dir, &sinfo->slot_off, &sinfo->bh, &sinfo->de) >= 0) { if (!strncmp(sinfo->de->name, name, MSDOS_NAME)) { sinfo->slot_off -= sizeof(*sinfo->de); sinfo->nr_slots = 1; sinfo->i_pos = fat_make_i_pos(sb, sinfo->bh, sinfo->de); return 0; } } return -ENOENT; } EXPORT_SYMBOL_GPL(fat_scan); /* * Scans a directory for a given logstart. * Returns an error code or zero. */ int fat_scan_logstart(struct inode *dir, int i_logstart, struct fat_slot_info *sinfo) { struct super_block *sb = dir->i_sb; sinfo->slot_off = 0; sinfo->bh = NULL; while (fat_get_short_entry(dir, &sinfo->slot_off, &sinfo->bh, &sinfo->de) >= 0) { if (fat_get_start(MSDOS_SB(sb), sinfo->de) == i_logstart) { sinfo->slot_off -= sizeof(*sinfo->de); sinfo->nr_slots = 1; sinfo->i_pos = fat_make_i_pos(sb, sinfo->bh, sinfo->de); return 0; } } return -ENOENT; } static int __fat_remove_entries(struct inode *dir, loff_t pos, int nr_slots) { struct super_block *sb = dir->i_sb; struct buffer_head *bh; struct msdos_dir_entry *de, *endp; int err = 0, orig_slots; while (nr_slots) { bh = NULL; if (fat_get_entry(dir, &pos, &bh, &de) < 0) { err = -EIO; break; } orig_slots = nr_slots; endp = (struct msdos_dir_entry *)(bh->b_data + sb->s_blocksize); while (nr_slots && de < endp) { de->name[0] = DELETED_FLAG; de++; nr_slots--; } mark_buffer_dirty_inode(bh, dir); if (IS_DIRSYNC(dir)) err = sync_dirty_buffer(bh); brelse(bh); if (err) break; /* pos is *next* de's position, so this does `- sizeof(de)' */ pos += ((orig_slots - nr_slots) * sizeof(*de)) - sizeof(*de); } return err; } int fat_remove_entries(struct inode *dir, struct fat_slot_info *sinfo) { struct super_block *sb = dir->i_sb; struct msdos_dir_entry *de; struct buffer_head *bh; int err = 0, nr_slots; /* * First stage: Remove the shortname. By this, the directory * entry is removed. */ nr_slots = sinfo->nr_slots; de = sinfo->de; sinfo->de = NULL; bh = sinfo->bh; sinfo->bh = NULL; while (nr_slots && de >= (struct msdos_dir_entry *)bh->b_data) { de->name[0] = DELETED_FLAG; de--; nr_slots--; } mark_buffer_dirty_inode(bh, dir); if (IS_DIRSYNC(dir)) err = sync_dirty_buffer(bh); brelse(bh); if (err) return err; inode_inc_iversion(dir); if (nr_slots) { /* * Second stage: remove the remaining longname slots. * (This directory entry is already removed, and so return * the success) */ err = __fat_remove_entries(dir, sinfo->slot_off, nr_slots); if (err) { fat_msg(sb, KERN_WARNING, "Couldn't remove the long name slots"); } } fat_truncate_time(dir, NULL, S_ATIME|S_MTIME); if (IS_DIRSYNC(dir)) (void)fat_sync_inode(dir); else mark_inode_dirty(dir); return 0; } EXPORT_SYMBOL_GPL(fat_remove_entries); static int fat_zeroed_cluster(struct inode *dir, sector_t blknr, int nr_used, struct buffer_head **bhs, int nr_bhs) { struct super_block *sb = dir->i_sb; sector_t last_blknr = blknr + MSDOS_SB(sb)->sec_per_clus; int err, i, n; /* Zeroing the unused blocks on this cluster */ blknr += nr_used; n = nr_used; while (blknr < last_blknr) { bhs[n] = sb_getblk(sb, blknr); if (!bhs[n]) { err = -ENOMEM; goto error; } /* Avoid race with userspace read via bdev */ lock_buffer(bhs[n]); memset(bhs[n]->b_data, 0, sb->s_blocksize); set_buffer_uptodate(bhs[n]); unlock_buffer(bhs[n]); mark_buffer_dirty_inode(bhs[n], dir); n++; blknr++; if (n == nr_bhs) { if (IS_DIRSYNC(dir)) { err = fat_sync_bhs(bhs, n); if (err) goto error; } for (i = 0; i < n; i++) brelse(bhs[i]); n = 0; } } if (IS_DIRSYNC(dir)) { err = fat_sync_bhs(bhs, n); if (err) goto error; } for (i = 0; i < n; i++) brelse(bhs[i]); return 0; error: for (i = 0; i < n; i++) bforget(bhs[i]); return err; } int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts) { struct super_block *sb = dir->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(sb); struct buffer_head *bhs[MAX_BUF_PER_PAGE]; struct msdos_dir_entry *de; sector_t blknr; __le16 date, time; u8 time_cs; int err, cluster; err = fat_alloc_clusters(dir, &cluster, 1); if (err) goto error; blknr = fat_clus_to_blknr(sbi, cluster); bhs[0] = sb_getblk(sb, blknr); if (!bhs[0]) { err = -ENOMEM; goto error_free; } fat_time_unix2fat(sbi, ts, &time, &date, &time_cs); de = (struct msdos_dir_entry *)bhs[0]->b_data; /* Avoid race with userspace read via bdev */ lock_buffer(bhs[0]); /* filling the new directory slots ("." and ".." entries) */ memcpy(de[0].name, MSDOS_DOT, MSDOS_NAME); memcpy(de[1].name, MSDOS_DOTDOT, MSDOS_NAME); de->attr = de[1].attr = ATTR_DIR; de[0].lcase = de[1].lcase = 0; de[0].time = de[1].time = time; de[0].date = de[1].date = date; if (sbi->options.isvfat) { /* extra timestamps */ de[0].ctime = de[1].ctime = time; de[0].ctime_cs = de[1].ctime_cs = time_cs; de[0].adate = de[0].cdate = de[1].adate = de[1].cdate = date; } else { de[0].ctime = de[1].ctime = 0; de[0].ctime_cs = de[1].ctime_cs = 0; de[0].adate = de[0].cdate = de[1].adate = de[1].cdate = 0; } fat_set_start(&de[0], cluster); fat_set_start(&de[1], MSDOS_I(dir)->i_logstart); de[0].size = de[1].size = 0; memset(de + 2, 0, sb->s_blocksize - 2 * sizeof(*de)); set_buffer_uptodate(bhs[0]); unlock_buffer(bhs[0]); mark_buffer_dirty_inode(bhs[0], dir); err = fat_zeroed_cluster(dir, blknr, 1, bhs, MAX_BUF_PER_PAGE); if (err) goto error_free; return cluster; error_free: fat_free_clusters(dir, cluster); error: return err; } EXPORT_SYMBOL_GPL(fat_alloc_new_dir); static int fat_add_new_entries(struct inode *dir, void *slots, int nr_slots, int *nr_cluster, struct msdos_dir_entry **de, struct buffer_head **bh, loff_t *i_pos) { struct super_block *sb = dir->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(sb); struct buffer_head *bhs[MAX_BUF_PER_PAGE]; sector_t blknr, start_blknr, last_blknr; unsigned long size, copy; int err, i, n, offset, cluster[2]; /* * The minimum cluster size is 512bytes, and maximum entry * size is 32*slots (672bytes). So, iff the cluster size is * 512bytes, we may need two clusters. */ size = nr_slots * sizeof(struct msdos_dir_entry); *nr_cluster = (size + (sbi->cluster_size - 1)) >> sbi->cluster_bits; BUG_ON(*nr_cluster > 2); err = fat_alloc_clusters(dir, cluster, *nr_cluster); if (err) goto error; /* * First stage: Fill the directory entry. NOTE: This cluster * is not referenced from any inode yet, so updates order is * not important. */ i = n = copy = 0; do { start_blknr = blknr = fat_clus_to_blknr(sbi, cluster[i]); last_blknr = start_blknr + sbi->sec_per_clus; while (blknr < last_blknr) { bhs[n] = sb_getblk(sb, blknr); if (!bhs[n]) { err = -ENOMEM; goto error_nomem; } /* fill the directory entry */ copy = min(size, sb->s_blocksize); /* Avoid race with userspace read via bdev */ lock_buffer(bhs[n]); memcpy(bhs[n]->b_data, slots, copy); set_buffer_uptodate(bhs[n]); unlock_buffer(bhs[n]); mark_buffer_dirty_inode(bhs[n], dir); slots += copy; size -= copy; if (!size) break; n++; blknr++; } } while (++i < *nr_cluster); memset(bhs[n]->b_data + copy, 0, sb->s_blocksize - copy); offset = copy - sizeof(struct msdos_dir_entry); get_bh(bhs[n]); *bh = bhs[n]; *de = (struct msdos_dir_entry *)((*bh)->b_data + offset); *i_pos = fat_make_i_pos(sb, *bh, *de); /* Second stage: clear the rest of cluster, and write outs */ err = fat_zeroed_cluster(dir, start_blknr, ++n, bhs, MAX_BUF_PER_PAGE); if (err) goto error_free; return cluster[0]; error_free: brelse(*bh); *bh = NULL; n = 0; error_nomem: for (i = 0; i < n; i++) bforget(bhs[i]); fat_free_clusters(dir, cluster[0]); error: return err; } int fat_add_entries(struct inode *dir, void *slots, int nr_slots, struct fat_slot_info *sinfo) { struct super_block *sb = dir->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(sb); struct buffer_head *bh, *prev, *bhs[3]; /* 32*slots (672bytes) */ struct msdos_dir_entry *de; int err, free_slots, i, nr_bhs; loff_t pos, i_pos; sinfo->nr_slots = nr_slots; /* First stage: search free directory entries */ free_slots = nr_bhs = 0; bh = prev = NULL; pos = 0; err = -ENOSPC; while (fat_get_entry(dir, &pos, &bh, &de) > -1) { /* check the maximum size of directory */ if (pos >= FAT_MAX_DIR_SIZE) goto error; if (IS_FREE(de->name)) { if (prev != bh) { get_bh(bh); bhs[nr_bhs] = prev = bh; nr_bhs++; } free_slots++; if (free_slots == nr_slots) goto found; } else { for (i = 0; i < nr_bhs; i++) brelse(bhs[i]); prev = NULL; free_slots = nr_bhs = 0; } } if (dir->i_ino == MSDOS_ROOT_INO) { if (!is_fat32(sbi)) goto error; } else if (MSDOS_I(dir)->i_start == 0) { fat_msg(sb, KERN_ERR, "Corrupted directory (i_pos %lld)", MSDOS_I(dir)->i_pos); err = -EIO; goto error; } found: err = 0; pos -= free_slots * sizeof(*de); nr_slots -= free_slots; if (free_slots) { /* * Second stage: filling the free entries with new entries. * NOTE: If this slots has shortname, first, we write * the long name slots, then write the short name. */ int size = free_slots * sizeof(*de); int offset = pos & (sb->s_blocksize - 1); int long_bhs = nr_bhs - (nr_slots == 0); /* Fill the long name slots. */ for (i = 0; i < long_bhs; i++) { int copy = min_t(int, sb->s_blocksize - offset, size); memcpy(bhs[i]->b_data + offset, slots, copy); mark_buffer_dirty_inode(bhs[i], dir); offset = 0; slots += copy; size -= copy; } if (long_bhs && IS_DIRSYNC(dir)) err = fat_sync_bhs(bhs, long_bhs); if (!err && i < nr_bhs) { /* Fill the short name slot. */ int copy = min_t(int, sb->s_blocksize - offset, size); memcpy(bhs[i]->b_data + offset, slots, copy); mark_buffer_dirty_inode(bhs[i], dir); if (IS_DIRSYNC(dir)) err = sync_dirty_buffer(bhs[i]); } for (i = 0; i < nr_bhs; i++) brelse(bhs[i]); if (err) goto error_remove; } if (nr_slots) { int cluster, nr_cluster; /* * Third stage: allocate the cluster for new entries. * And initialize the cluster with new entries, then * add the cluster to dir. */ cluster = fat_add_new_entries(dir, slots, nr_slots, &nr_cluster, &de, &bh, &i_pos); if (cluster < 0) { err = cluster; goto error_remove; } err = fat_chain_add(dir, cluster, nr_cluster); if (err) { fat_free_clusters(dir, cluster); goto error_remove; } if (dir->i_size & (sbi->cluster_size - 1)) { fat_fs_error(sb, "Odd directory size"); dir->i_size = (dir->i_size + sbi->cluster_size - 1) & ~((loff_t)sbi->cluster_size - 1); } dir->i_size += nr_cluster << sbi->cluster_bits; MSDOS_I(dir)->mmu_private += nr_cluster << sbi->cluster_bits; } sinfo->slot_off = pos; sinfo->de = de; sinfo->bh = bh; sinfo->i_pos = fat_make_i_pos(sb, sinfo->bh, sinfo->de); return 0; error: brelse(bh); for (i = 0; i < nr_bhs; i++) brelse(bhs[i]); return err; error_remove: brelse(bh); if (free_slots) __fat_remove_entries(dir, pos, free_slots); return err; } EXPORT_SYMBOL_GPL(fat_add_entries);
25 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 // SPDX-License-Identifier: GPL-2.0-or-later /* AFS volume management * * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #include <linux/kernel.h> #include <linux/slab.h> #include "internal.h" static unsigned __read_mostly afs_volume_record_life = 60 * 60; static void afs_destroy_volume(struct work_struct *work); /* * Insert a volume into a cell. If there's an existing volume record, that is * returned instead with a ref held. */ static struct afs_volume *afs_insert_volume_into_cell(struct afs_cell *cell, struct afs_volume *volume) { struct afs_volume *p; struct rb_node *parent = NULL, **pp; write_seqlock(&cell->volume_lock); pp = &cell->volumes.rb_node; while (*pp) { parent = *pp; p = rb_entry(parent, struct afs_volume, cell_node); if (p->vid < volume->vid) { pp = &(*pp)->rb_left; } else if (p->vid > volume->vid) { pp = &(*pp)->rb_right; } else { if (afs_try_get_volume(p, afs_volume_trace_get_cell_insert)) { volume = p; goto found; } set_bit(AFS_VOLUME_RM_TREE, &volume->flags); rb_replace_node_rcu(&p->cell_node, &volume->cell_node, &cell->volumes); } } rb_link_node_rcu(&volume->cell_node, parent, pp); rb_insert_color(&volume->cell_node, &cell->volumes); hlist_add_head_rcu(&volume->proc_link, &cell->proc_volumes); found: write_sequnlock(&cell->volume_lock); return volume; } static void afs_remove_volume_from_cell(struct afs_volume *volume) { struct afs_cell *cell = volume->cell; if (!hlist_unhashed(&volume->proc_link)) { trace_afs_volume(volume->vid, refcount_read(&cell->ref), afs_volume_trace_remove); write_seqlock(&cell->volume_lock); hlist_del_rcu(&volume->proc_link); if (!test_and_set_bit(AFS_VOLUME_RM_TREE, &volume->flags)) rb_erase(&volume->cell_node, &cell->volumes); write_sequnlock(&cell->volume_lock); } } /* * Allocate a volume record and load it up from a vldb record. */ static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params, struct afs_vldb_entry *vldb, struct afs_server_list **_slist) { struct afs_server_list *slist; struct afs_volume *volume; int ret = -ENOMEM, i; volume = kzalloc(sizeof(struct afs_volume), GFP_KERNEL); if (!volume) goto error_0; volume->vid = vldb->vid[params->type]; volume->update_at = ktime_get_real_seconds() + afs_volume_record_life; volume->cell = afs_get_cell(params->cell, afs_cell_trace_get_vol); volume->type = params->type; volume->type_force = params->force; volume->name_len = vldb->name_len; volume->creation_time = TIME64_MIN; volume->update_time = TIME64_MIN; refcount_set(&volume->ref, 1); INIT_HLIST_NODE(&volume->proc_link); INIT_WORK(&volume->destructor, afs_destroy_volume); rwlock_init(&volume->servers_lock); mutex_init(&volume->volsync_lock); mutex_init(&volume->cb_check_lock); rwlock_init(&volume->cb_v_break_lock); INIT_LIST_HEAD(&volume->open_mmaps); init_rwsem(&volume->open_mmaps_lock); memcpy(volume->name, vldb->name, vldb->name_len + 1); for (i = 0; i < AFS_MAXTYPES; i++) volume->vids[i] = vldb->vid[i]; slist = afs_alloc_server_list(volume, params->key, vldb); if (IS_ERR(slist)) { ret = PTR_ERR(slist); goto error_1; } *_slist = slist; rcu_assign_pointer(volume->servers, slist); trace_afs_volume(volume->vid, 1, afs_volume_trace_alloc); return volume; error_1: afs_put_cell(volume->cell, afs_cell_trace_put_vol); kfree(volume); error_0: return ERR_PTR(ret); } /* * Look up or allocate a volume record. */ static struct afs_volume *afs_lookup_volume(struct afs_fs_context *params, struct afs_vldb_entry *vldb) { struct afs_server_list *slist; struct afs_volume *candidate, *volume; candidate = afs_alloc_volume(params, vldb, &slist); if (IS_ERR(candidate)) return candidate; volume = afs_insert_volume_into_cell(params->cell, candidate); if (volume == candidate) afs_attach_volume_to_servers(volume, slist); else afs_put_volume(candidate, afs_volume_trace_put_cell_dup); return volume; } /* * Look up a VLDB record for a volume. */ static struct afs_vldb_entry *afs_vl_lookup_vldb(struct afs_cell *cell, struct key *key, const char *volname, size_t volnamesz) { struct afs_vldb_entry *vldb = ERR_PTR(-EDESTADDRREQ); struct afs_vl_cursor vc; int ret; if (!afs_begin_vlserver_operation(&vc, cell, key)) return ERR_PTR(-ERESTARTSYS); while (afs_select_vlserver(&vc)) { vldb = afs_vl_get_entry_by_name_u(&vc, volname, volnamesz); } ret = afs_end_vlserver_operation(&vc); return ret < 0 ? ERR_PTR(ret) : vldb; } /* * Look up a volume in the VL server and create a candidate volume record for * it. * * The volume name can be one of the following: * "%[cell:]volume[.]" R/W volume * "#[cell:]volume[.]" R/O or R/W volume (rwparent=0), * or R/W (rwparent=1) volume * "%[cell:]volume.readonly" R/O volume * "#[cell:]volume.readonly" R/O volume * "%[cell:]volume.backup" Backup volume * "#[cell:]volume.backup" Backup volume * * The cell name is optional, and defaults to the current cell. * * See "The Rules of Mount Point Traversal" in Chapter 5 of the AFS SysAdmin * Guide * - Rule 1: Explicit type suffix forces access of that type or nothing * (no suffix, then use Rule 2 & 3) * - Rule 2: If parent volume is R/O, then mount R/O volume by preference, R/W * if not available * - Rule 3: If parent volume is R/W, then only mount R/W volume unless * explicitly told otherwise */ struct afs_volume *afs_create_volume(struct afs_fs_context *params) { struct afs_vldb_entry *vldb; struct afs_volume *volume; unsigned long type_mask = 1UL << params->type; vldb = afs_vl_lookup_vldb(params->cell, params->key, params->volname, params->volnamesz); if (IS_ERR(vldb)) return ERR_CAST(vldb); if (test_bit(AFS_VLDB_QUERY_ERROR, &vldb->flags)) { volume = ERR_PTR(vldb->error); goto error; } /* Make the final decision on the type we want */ volume = ERR_PTR(-ENOMEDIUM); if (params->force) { if (!(vldb->flags & type_mask)) goto error; } else if (test_bit(AFS_VLDB_HAS_RO, &vldb->flags)) { params->type = AFSVL_ROVOL; } else if (test_bit(AFS_VLDB_HAS_RW, &vldb->flags)) { params->type = AFSVL_RWVOL; } else { goto error; } volume = afs_lookup_volume(params, vldb); error: kfree(vldb); return volume; } /* * Destroy a volume record */ static void afs_destroy_volume(struct work_struct *work) { struct afs_volume *volume = container_of(work, struct afs_volume, destructor); struct afs_server_list *slist = rcu_access_pointer(volume->servers); _enter("%p", volume); #ifdef CONFIG_AFS_FSCACHE ASSERTCMP(volume->cache, ==, NULL); #endif afs_detach_volume_from_servers(volume, slist); afs_remove_volume_from_cell(volume); afs_put_serverlist(volume->cell->net, slist); afs_put_cell(volume->cell, afs_cell_trace_put_vol); trace_afs_volume(volume->vid, refcount_read(&volume->ref), afs_volume_trace_free); kfree_rcu(volume, rcu); _leave(" [destroyed]"); } /* * Try to get a reference on a volume record. */ bool afs_try_get_volume(struct afs_volume *volume, enum afs_volume_trace reason) { int r; if (__refcount_inc_not_zero(&volume->ref, &r)) { trace_afs_volume(volume->vid, r + 1, reason); return true; } return false; } /* * Get a reference on a volume record. */ struct afs_volume *afs_get_volume(struct afs_volume *volume, enum afs_volume_trace reason) { if (volume) { int r; __refcount_inc(&volume->ref, &r); trace_afs_volume(volume->vid, r + 1, reason); } return volume; } /* * Drop a reference on a volume record. */ void afs_put_volume(struct afs_volume *volume, enum afs_volume_trace reason) { if (volume) { afs_volid_t vid = volume->vid; bool zero; int r; zero = __refcount_dec_and_test(&volume->ref, &r); trace_afs_volume(vid, r - 1, reason); if (zero) schedule_work(&volume->destructor); } } /* * Activate a volume. */ int afs_activate_volume(struct afs_volume *volume) { #ifdef CONFIG_AFS_FSCACHE struct fscache_volume *vcookie; char *name; name = kasprintf(GFP_KERNEL, "afs,%s,%llx", volume->cell->name, volume->vid); if (!name) return -ENOMEM; vcookie = fscache_acquire_volume(name, NULL, NULL, 0); if (IS_ERR(vcookie)) { if (vcookie != ERR_PTR(-EBUSY)) { kfree(name); return PTR_ERR(vcookie); } pr_err("AFS: Cache volume key already in use (%s)\n", name); vcookie = NULL; } volume->cache = vcookie; kfree(name); #endif return 0; } /* * Deactivate a volume. */ void afs_deactivate_volume(struct afs_volume *volume) { _enter("%s", volume->name); #ifdef CONFIG_AFS_FSCACHE fscache_relinquish_volume(volume->cache, NULL, test_bit(AFS_VOLUME_DELETED, &volume->flags)); volume->cache = NULL; #endif _leave(""); } /* * Query the VL service to update the volume status. */ static int afs_update_volume_status(struct afs_volume *volume, struct key *key) { struct afs_server_list *new, *old, *discard; struct afs_vldb_entry *vldb; char idbuf[24]; int ret, idsz; _enter(""); /* We look up an ID by passing it as a decimal string in the * operation's name parameter. */ idsz = snprintf(idbuf, sizeof(idbuf), "%llu", volume->vid); vldb = afs_vl_lookup_vldb(volume->cell, key, idbuf, idsz); if (IS_ERR(vldb)) { ret = PTR_ERR(vldb); goto error; } /* See if the volume got renamed. */ if (vldb->name_len != volume->name_len || memcmp(vldb->name, volume->name, vldb->name_len) != 0) { /* TODO: Use RCU'd string. */ memcpy(volume->name, vldb->name, AFS_MAXVOLNAME); volume->name_len = vldb->name_len; } /* See if the volume's server list got updated. */ new = afs_alloc_server_list(volume, key, vldb); if (IS_ERR(new)) { ret = PTR_ERR(new); goto error_vldb; } write_lock(&volume->servers_lock); discard = new; old = rcu_dereference_protected(volume->servers, lockdep_is_held(&volume->servers_lock)); if (afs_annotate_server_list(new, old)) { new->seq = volume->servers_seq + 1; rcu_assign_pointer(volume->servers, new); smp_wmb(); volume->servers_seq++; discard = old; } /* Check more often if replication is ongoing. */ if (new->ro_replicating) volume->update_at = ktime_get_real_seconds() + 10 * 60; else volume->update_at = ktime_get_real_seconds() + afs_volume_record_life; write_unlock(&volume->servers_lock); if (discard == old) afs_reattach_volume_to_servers(volume, new, old); afs_put_serverlist(volume->cell->net, discard); ret = 0; error_vldb: kfree(vldb); error: _leave(" = %d", ret); return ret; } /* * Make sure the volume record is up to date. */ int afs_check_volume_status(struct afs_volume *volume, struct afs_operation *op) { int ret, retries = 0; _enter(""); retry: if (test_bit(AFS_VOLUME_WAIT, &volume->flags)) goto wait; if (volume->update_at <= ktime_get_real_seconds() || test_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags)) goto update; _leave(" = 0"); return 0; update: if (!test_and_set_bit_lock(AFS_VOLUME_UPDATING, &volume->flags)) { clear_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags); ret = afs_update_volume_status(volume, op->key); if (ret < 0) set_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags); clear_bit_unlock(AFS_VOLUME_WAIT, &volume->flags); clear_bit_unlock(AFS_VOLUME_UPDATING, &volume->flags); wake_up_bit(&volume->flags, AFS_VOLUME_WAIT); _leave(" = %d", ret); return ret; } wait: if (!test_bit(AFS_VOLUME_WAIT, &volume->flags)) { _leave(" = 0 [no wait]"); return 0; } ret = wait_on_bit(&volume->flags, AFS_VOLUME_WAIT, (op->flags & AFS_OPERATION_UNINTR) ? TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE); if (ret == -ERESTARTSYS) { _leave(" = %d", ret); return ret; } retries++; if (retries == 4) { _leave(" = -ESTALE"); return -ESTALE; } goto retry; }
87 125 155 44 29 25 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NF_CONNTRACK_EXTEND_H #define _NF_CONNTRACK_EXTEND_H #include <linux/slab.h> #include <net/netfilter/nf_conntrack.h> enum nf_ct_ext_id { NF_CT_EXT_HELPER, #if IS_ENABLED(CONFIG_NF_NAT) NF_CT_EXT_NAT, #endif NF_CT_EXT_SEQADJ, NF_CT_EXT_ACCT, #ifdef CONFIG_NF_CONNTRACK_EVENTS NF_CT_EXT_ECACHE, #endif #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP NF_CT_EXT_TSTAMP, #endif #ifdef CONFIG_NF_CONNTRACK_TIMEOUT NF_CT_EXT_TIMEOUT, #endif #ifdef CONFIG_NF_CONNTRACK_LABELS NF_CT_EXT_LABELS, #endif #if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY) NF_CT_EXT_SYNPROXY, #endif #if IS_ENABLED(CONFIG_NET_ACT_CT) NF_CT_EXT_ACT_CT, #endif NF_CT_EXT_NUM, }; /* Extensions: optional stuff which isn't permanently in struct. */ struct nf_ct_ext { u8 offset[NF_CT_EXT_NUM]; u8 len; unsigned int gen_id; char data[] __aligned(8); }; static inline bool __nf_ct_ext_exist(const struct nf_ct_ext *ext, u8 id) { return !!ext->offset[id]; } static inline bool nf_ct_ext_exist(const struct nf_conn *ct, u8 id) { return (ct->ext && __nf_ct_ext_exist(ct->ext, id)); } void *__nf_ct_ext_find(const struct nf_ct_ext *ext, u8 id); static inline void *nf_ct_ext_find(const struct nf_conn *ct, u8 id) { struct nf_ct_ext *ext = ct->ext; if (!ext || !__nf_ct_ext_exist(ext, id)) return NULL; if (unlikely(ext->gen_id)) return __nf_ct_ext_find(ext, id); return (void *)ct->ext + ct->ext->offset[id]; } /* Add this type, returns pointer to data or NULL. */ void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp); /* ext genid. if ext->id != ext_genid, extensions cannot be used * anymore unless conntrack has CONFIRMED bit set. */ extern atomic_t nf_conntrack_ext_genid; void nf_ct_ext_bump_genid(void); #endif /* _NF_CONNTRACK_EXTEND_H */
240 240 3042 15 14 14 31 14 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM sched #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_SCHED_H #include <linux/kthread.h> #include <linux/sched/numa_balancing.h> #include <linux/tracepoint.h> #include <linux/binfmts.h> /* * Tracepoint for calling kthread_stop, performed to end a kthread: */ TRACE_EVENT(sched_kthread_stop, TP_PROTO(struct task_struct *t), TP_ARGS(t), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) __field( pid_t, pid ) ), TP_fast_assign( memcpy(__entry->comm, t->comm, TASK_COMM_LEN); __entry->pid = t->pid; ), TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid) ); /* * Tracepoint for the return value of the kthread stopping: */ TRACE_EVENT(sched_kthread_stop_ret, TP_PROTO(int ret), TP_ARGS(ret), TP_STRUCT__entry( __field( int, ret ) ), TP_fast_assign( __entry->ret = ret; ), TP_printk("ret=%d", __entry->ret) ); /** * sched_kthread_work_queue_work - called when a work gets queued * @worker: pointer to the kthread_worker * @work: pointer to struct kthread_work * * This event occurs when a work is queued immediately or once a * delayed work is actually queued (ie: once the delay has been * reached). */ TRACE_EVENT(sched_kthread_work_queue_work, TP_PROTO(struct kthread_worker *worker, struct kthread_work *work), TP_ARGS(worker, work), TP_STRUCT__entry( __field( void *, work ) __field( void *, function) __field( void *, worker) ), TP_fast_assign( __entry->work = work; __entry->function = work->func; __entry->worker = worker; ), TP_printk("work struct=%p function=%ps worker=%p", __entry->work, __entry->function, __entry->worker) ); /** * sched_kthread_work_execute_start - called immediately before the work callback * @work: pointer to struct kthread_work * * Allows to track kthread work execution. */ TRACE_EVENT(sched_kthread_work_execute_start, TP_PROTO(struct kthread_work *work), TP_ARGS(work), TP_STRUCT__entry( __field( void *, work ) __field( void *, function) ), TP_fast_assign( __entry->work = work; __entry->function = work->func; ), TP_printk("work struct %p: function %ps", __entry->work, __entry->function) ); /** * sched_kthread_work_execute_end - called immediately after the work callback * @work: pointer to struct work_struct * @function: pointer to worker function * * Allows to track workqueue execution. */ TRACE_EVENT(sched_kthread_work_execute_end, TP_PROTO(struct kthread_work *work, kthread_work_func_t function), TP_ARGS(work, function), TP_STRUCT__entry( __field( void *, work ) __field( void *, function) ), TP_fast_assign( __entry->work = work; __entry->function = function; ), TP_printk("work struct %p: function %ps", __entry->work, __entry->function) ); /* * Tracepoint for waking up a task: */ DECLARE_EVENT_CLASS(sched_wakeup_template, TP_PROTO(struct task_struct *p), TP_ARGS(__perf_task(p)), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) __field( pid_t, pid ) __field( int, prio ) __field( int, target_cpu ) ), TP_fast_assign( memcpy(__entry->comm, p->comm, TASK_COMM_LEN); __entry->pid = p->pid; __entry->prio = p->prio; /* XXX SCHED_DEADLINE */ __entry->target_cpu = task_cpu(p); ), TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d", __entry->comm, __entry->pid, __entry->prio, __entry->target_cpu) ); /* * Tracepoint called when waking a task; this tracepoint is guaranteed to be * called from the waking context. */ DEFINE_EVENT(sched_wakeup_template, sched_waking, TP_PROTO(struct task_struct *p), TP_ARGS(p)); /* * Tracepoint called when the task is actually woken; p->state == TASK_RUNNING. * It is not always called from the waking context. */ DEFINE_EVENT(sched_wakeup_template, sched_wakeup, TP_PROTO(struct task_struct *p), TP_ARGS(p)); /* * Tracepoint for waking up a new task: */ DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new, TP_PROTO(struct task_struct *p), TP_ARGS(p)); #ifdef CREATE_TRACE_POINTS static inline long __trace_sched_switch_state(bool preempt, unsigned int prev_state, struct task_struct *p) { unsigned int state; #ifdef CONFIG_SCHED_DEBUG BUG_ON(p != current); #endif /* CONFIG_SCHED_DEBUG */ /* * Preemption ignores task state, therefore preempted tasks are always * RUNNING (we will not have dequeued if state != RUNNING). */ if (preempt) return TASK_REPORT_MAX; /* * task_state_index() uses fls() and returns a value from 0-8 range. * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using * it for left shift operation to get the correct task->state * mapping. */ state = __task_state_index(prev_state, p->exit_state); return state ? (1 << (state - 1)) : state; } #endif /* CREATE_TRACE_POINTS */ /* * Tracepoint for task switches, performed by the scheduler: */ TRACE_EVENT(sched_switch, TP_PROTO(bool preempt, struct task_struct *prev, struct task_struct *next, unsigned int prev_state), TP_ARGS(preempt, prev, next, prev_state), TP_STRUCT__entry( __array( char, prev_comm, TASK_COMM_LEN ) __field( pid_t, prev_pid ) __field( int, prev_prio ) __field( long, prev_state ) __array( char, next_comm, TASK_COMM_LEN ) __field( pid_t, next_pid ) __field( int, next_prio ) ), TP_fast_assign( memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); __entry->prev_pid = prev->pid; __entry->prev_prio = prev->prio; __entry->prev_state = __trace_sched_switch_state(preempt, prev_state, prev); memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); __entry->next_pid = next->pid; __entry->next_prio = next->prio; /* XXX SCHED_DEADLINE */ ), TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d", __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, (__entry->prev_state & (TASK_REPORT_MAX - 1)) ? __print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|", { TASK_INTERRUPTIBLE, "S" }, { TASK_UNINTERRUPTIBLE, "D" }, { __TASK_STOPPED, "T" }, { __TASK_TRACED, "t" }, { EXIT_DEAD, "X" }, { EXIT_ZOMBIE, "Z" }, { TASK_PARKED, "P" }, { TASK_DEAD, "I" }) : "R", __entry->prev_state & TASK_REPORT_MAX ? "+" : "", __entry->next_comm, __entry->next_pid, __entry->next_prio) ); /* * Tracepoint for a task being migrated: */ TRACE_EVENT(sched_migrate_task, TP_PROTO(struct task_struct *p, int dest_cpu), TP_ARGS(p, dest_cpu), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) __field( pid_t, pid ) __field( int, prio ) __field( int, orig_cpu ) __field( int, dest_cpu ) ), TP_fast_assign( memcpy(__entry->comm, p->comm, TASK_COMM_LEN); __entry->pid = p->pid; __entry->prio = p->prio; /* XXX SCHED_DEADLINE */ __entry->orig_cpu = task_cpu(p); __entry->dest_cpu = dest_cpu; ), TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d", __entry->comm, __entry->pid, __entry->prio, __entry->orig_cpu, __entry->dest_cpu) ); DECLARE_EVENT_CLASS(sched_process_template, TP_PROTO(struct task_struct *p), TP_ARGS(p), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) __field( pid_t, pid ) __field( int, prio ) ), TP_fast_assign( memcpy(__entry->comm, p->comm, TASK_COMM_LEN); __entry->pid = p->pid; __entry->prio = p->prio; /* XXX SCHED_DEADLINE */ ), TP_printk("comm=%s pid=%d prio=%d", __entry->comm, __entry->pid, __entry->prio) ); /* * Tracepoint for freeing a task: */ DEFINE_EVENT(sched_process_template, sched_process_free, TP_PROTO(struct task_struct *p), TP_ARGS(p)); /* * Tracepoint for a task exiting: */ DEFINE_EVENT(sched_process_template, sched_process_exit, TP_PROTO(struct task_struct *p), TP_ARGS(p)); /* * Tracepoint for waiting on task to unschedule: */ DEFINE_EVENT(sched_process_template, sched_wait_task, TP_PROTO(struct task_struct *p), TP_ARGS(p)); /* * Tracepoint for a waiting task: */ TRACE_EVENT(sched_process_wait, TP_PROTO(struct pid *pid), TP_ARGS(pid), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) __field( pid_t, pid ) __field( int, prio ) ), TP_fast_assign( memcpy(__entry->comm, current->comm, TASK_COMM_LEN); __entry->pid = pid_nr(pid); __entry->prio = current->prio; /* XXX SCHED_DEADLINE */ ), TP_printk("comm=%s pid=%d prio=%d", __entry->comm, __entry->pid, __entry->prio) ); /* * Tracepoint for kernel_clone: */ TRACE_EVENT(sched_process_fork, TP_PROTO(struct task_struct *parent, struct task_struct *child), TP_ARGS(parent, child), TP_STRUCT__entry( __array( char, parent_comm, TASK_COMM_LEN ) __field( pid_t, parent_pid ) __array( char, child_comm, TASK_COMM_LEN ) __field( pid_t, child_pid ) ), TP_fast_assign( memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN); __entry->parent_pid = parent->pid; memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN); __entry->child_pid = child->pid; ), TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d", __entry->parent_comm, __entry->parent_pid, __entry->child_comm, __entry->child_pid) ); /* * Tracepoint for exec: */ TRACE_EVENT(sched_process_exec, TP_PROTO(struct task_struct *p, pid_t old_pid, struct linux_binprm *bprm), TP_ARGS(p, old_pid, bprm), TP_STRUCT__entry( __string( filename, bprm->filename ) __field( pid_t, pid ) __field( pid_t, old_pid ) ), TP_fast_assign( __assign_str(filename); __entry->pid = p->pid; __entry->old_pid = old_pid; ), TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename), __entry->pid, __entry->old_pid) ); /** * sched_prepare_exec - called before setting up new exec * @task: pointer to the current task * @bprm: pointer to linux_binprm used for new exec * * Called before flushing the old exec, where @task is still unchanged, but at * the point of no return during switching to the new exec. At the point it is * called the exec will either succeed, or on failure terminate the task. Also * see the "sched_process_exec" tracepoint, which is called right after @task * has successfully switched to the new exec. */ TRACE_EVENT(sched_prepare_exec, TP_PROTO(struct task_struct *task, struct linux_binprm *bprm), TP_ARGS(task, bprm), TP_STRUCT__entry( __string( interp, bprm->interp ) __string( filename, bprm->filename ) __field( pid_t, pid ) __string( comm, task->comm ) ), TP_fast_assign( __assign_str(interp); __assign_str(filename); __entry->pid = task->pid; __assign_str(comm); ), TP_printk("interp=%s filename=%s pid=%d comm=%s", __get_str(interp), __get_str(filename), __entry->pid, __get_str(comm)) ); #ifdef CONFIG_SCHEDSTATS #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS #else #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT_NOP #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS_NOP #endif /* * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE * adding sched_stat support to SCHED_FIFO/RR would be welcome. */ DECLARE_EVENT_CLASS_SCHEDSTAT(sched_stat_template, TP_PROTO(struct task_struct *tsk, u64 delay), TP_ARGS(__perf_task(tsk), __perf_count(delay)), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) __field( pid_t, pid ) __field( u64, delay ) ), TP_fast_assign( memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); __entry->pid = tsk->pid; __entry->delay = delay; ), TP_printk("comm=%s pid=%d delay=%Lu [ns]", __entry->comm, __entry->pid, (unsigned long long)__entry->delay) ); /* * Tracepoint for accounting wait time (time the task is runnable * but not actually running due to scheduler contention). */ DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_wait, TP_PROTO(struct task_struct *tsk, u64 delay), TP_ARGS(tsk, delay)); /* * Tracepoint for accounting sleep time (time the task is not runnable, * including iowait, see below). */ DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_sleep, TP_PROTO(struct task_struct *tsk, u64 delay), TP_ARGS(tsk, delay)); /* * Tracepoint for accounting iowait time (time the task is not runnable * due to waiting on IO to complete). */ DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_iowait, TP_PROTO(struct task_struct *tsk, u64 delay), TP_ARGS(tsk, delay)); /* * Tracepoint for accounting blocked time (time the task is in uninterruptible). */ DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked, TP_PROTO(struct task_struct *tsk, u64 delay), TP_ARGS(tsk, delay)); /* * Tracepoint for accounting runtime (time the task is executing * on a CPU). */ DECLARE_EVENT_CLASS(sched_stat_runtime, TP_PROTO(struct task_struct *tsk, u64 runtime), TP_ARGS(tsk, __perf_count(runtime)), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) __field( pid_t, pid ) __field( u64, runtime ) ), TP_fast_assign( memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); __entry->pid = tsk->pid; __entry->runtime = runtime; ), TP_printk("comm=%s pid=%d runtime=%Lu [ns]", __entry->comm, __entry->pid, (unsigned long long)__entry->runtime) ); DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime, TP_PROTO(struct task_struct *tsk, u64 runtime), TP_ARGS(tsk, runtime)); /* * Tracepoint for showing priority inheritance modifying a tasks * priority. */ TRACE_EVENT(sched_pi_setprio, TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task), TP_ARGS(tsk, pi_task), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) __field( pid_t, pid ) __field( int, oldprio ) __field( int, newprio ) ), TP_fast_assign( memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); __entry->pid = tsk->pid; __entry->oldprio = tsk->prio; __entry->newprio = pi_task ? min(tsk->normal_prio, pi_task->prio) : tsk->normal_prio; /* XXX SCHED_DEADLINE bits missing */ ), TP_printk("comm=%s pid=%d oldprio=%d newprio=%d", __entry->comm, __entry->pid, __entry->oldprio, __entry->newprio) ); #ifdef CONFIG_DETECT_HUNG_TASK TRACE_EVENT(sched_process_hang, TP_PROTO(struct task_struct *tsk), TP_ARGS(tsk), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) __field( pid_t, pid ) ), TP_fast_assign( memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); __entry->pid = tsk->pid; ), TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid) ); #endif /* CONFIG_DETECT_HUNG_TASK */ /* * Tracks migration of tasks from one runqueue to another. Can be used to * detect if automatic NUMA balancing is bouncing between nodes. */ TRACE_EVENT(sched_move_numa, TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu), TP_ARGS(tsk, src_cpu, dst_cpu), TP_STRUCT__entry( __field( pid_t, pid ) __field( pid_t, tgid ) __field( pid_t, ngid ) __field( int, src_cpu ) __field( int, src_nid ) __field( int, dst_cpu ) __field( int, dst_nid ) ), TP_fast_assign( __entry->pid = task_pid_nr(tsk); __entry->tgid = task_tgid_nr(tsk); __entry->ngid = task_numa_group_id(tsk); __entry->src_cpu = src_cpu; __entry->src_nid = cpu_to_node(src_cpu); __entry->dst_cpu = dst_cpu; __entry->dst_nid = cpu_to_node(dst_cpu); ), TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d", __entry->pid, __entry->tgid, __entry->ngid, __entry->src_cpu, __entry->src_nid, __entry->dst_cpu, __entry->dst_nid) ); DECLARE_EVENT_CLASS(sched_numa_pair_template, TP_PROTO(struct task_struct *src_tsk, int src_cpu, struct task_struct *dst_tsk, int dst_cpu), TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu), TP_STRUCT__entry( __field( pid_t, src_pid ) __field( pid_t, src_tgid ) __field( pid_t, src_ngid ) __field( int, src_cpu ) __field( int, src_nid ) __field( pid_t, dst_pid ) __field( pid_t, dst_tgid ) __field( pid_t, dst_ngid ) __field( int, dst_cpu ) __field( int, dst_nid ) ), TP_fast_assign( __entry->src_pid = task_pid_nr(src_tsk); __entry->src_tgid = task_tgid_nr(src_tsk); __entry->src_ngid = task_numa_group_id(src_tsk); __entry->src_cpu = src_cpu; __entry->src_nid = cpu_to_node(src_cpu); __entry->dst_pid = dst_tsk ? task_pid_nr(dst_tsk) : 0; __entry->dst_tgid = dst_tsk ? task_tgid_nr(dst_tsk) : 0; __entry->dst_ngid = dst_tsk ? task_numa_group_id(dst_tsk) : 0; __entry->dst_cpu = dst_cpu; __entry->dst_nid = dst_cpu >= 0 ? cpu_to_node(dst_cpu) : -1; ), TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d", __entry->src_pid, __entry->src_tgid, __entry->src_ngid, __entry->src_cpu, __entry->src_nid, __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid, __entry->dst_cpu, __entry->dst_nid) ); DEFINE_EVENT(sched_numa_pair_template, sched_stick_numa, TP_PROTO(struct task_struct *src_tsk, int src_cpu, struct task_struct *dst_tsk, int dst_cpu), TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu) ); DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa, TP_PROTO(struct task_struct *src_tsk, int src_cpu, struct task_struct *dst_tsk, int dst_cpu), TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu) ); #ifdef CONFIG_NUMA_BALANCING #define NUMAB_SKIP_REASON \ EM( NUMAB_SKIP_UNSUITABLE, "unsuitable" ) \ EM( NUMAB_SKIP_SHARED_RO, "shared_ro" ) \ EM( NUMAB_SKIP_INACCESSIBLE, "inaccessible" ) \ EM( NUMAB_SKIP_SCAN_DELAY, "scan_delay" ) \ EM( NUMAB_SKIP_PID_INACTIVE, "pid_inactive" ) \ EM( NUMAB_SKIP_IGNORE_PID, "ignore_pid_inactive" ) \ EMe(NUMAB_SKIP_SEQ_COMPLETED, "seq_completed" ) /* Redefine for export. */ #undef EM #undef EMe #define EM(a, b) TRACE_DEFINE_ENUM(a); #define EMe(a, b) TRACE_DEFINE_ENUM(a); NUMAB_SKIP_REASON /* Redefine for symbolic printing. */ #undef EM #undef EMe #define EM(a, b) { a, b }, #define EMe(a, b) { a, b } TRACE_EVENT(sched_skip_vma_numa, TP_PROTO(struct mm_struct *mm, struct vm_area_struct *vma, enum numa_vmaskip_reason reason), TP_ARGS(mm, vma, reason), TP_STRUCT__entry( __field(unsigned long, numa_scan_offset) __field(unsigned long, vm_start) __field(unsigned long, vm_end) __field(enum numa_vmaskip_reason, reason) ), TP_fast_assign( __entry->numa_scan_offset = mm->numa_scan_offset; __entry->vm_start = vma->vm_start; __entry->vm_end = vma->vm_end; __entry->reason = reason; ), TP_printk("numa_scan_offset=%lX vm_start=%lX vm_end=%lX reason=%s", __entry->numa_scan_offset, __entry->vm_start, __entry->vm_end, __print_symbolic(__entry->reason, NUMAB_SKIP_REASON)) ); #endif /* CONFIG_NUMA_BALANCING */ /* * Tracepoint for waking a polling cpu without an IPI. */ TRACE_EVENT(sched_wake_idle_without_ipi, TP_PROTO(int cpu), TP_ARGS(cpu), TP_STRUCT__entry( __field( int, cpu ) ), TP_fast_assign( __entry->cpu = cpu; ), TP_printk("cpu=%d", __entry->cpu) ); /* * Following tracepoints are not exported in tracefs and provide hooking * mechanisms only for testing and debugging purposes. * * Postfixed with _tp to make them easily identifiable in the code. */ DECLARE_TRACE(pelt_cfs_tp, TP_PROTO(struct cfs_rq *cfs_rq), TP_ARGS(cfs_rq)); DECLARE_TRACE(pelt_rt_tp, TP_PROTO(struct rq *rq), TP_ARGS(rq)); DECLARE_TRACE(pelt_dl_tp, TP_PROTO(struct rq *rq), TP_ARGS(rq)); DECLARE_TRACE(pelt_hw_tp, TP_PROTO(struct rq *rq), TP_ARGS(rq)); DECLARE_TRACE(pelt_irq_tp, TP_PROTO(struct rq *rq), TP_ARGS(rq)); DECLARE_TRACE(pelt_se_tp, TP_PROTO(struct sched_entity *se), TP_ARGS(se)); DECLARE_TRACE(sched_cpu_capacity_tp, TP_PROTO(struct rq *rq), TP_ARGS(rq)); DECLARE_TRACE(sched_overutilized_tp, TP_PROTO(struct root_domain *rd, bool overutilized), TP_ARGS(rd, overutilized)); DECLARE_TRACE(sched_util_est_cfs_tp, TP_PROTO(struct cfs_rq *cfs_rq), TP_ARGS(cfs_rq)); DECLARE_TRACE(sched_util_est_se_tp, TP_PROTO(struct sched_entity *se), TP_ARGS(se)); DECLARE_TRACE(sched_update_nr_running_tp, TP_PROTO(struct rq *rq, int change), TP_ARGS(rq, change)); DECLARE_TRACE(sched_compute_energy_tp, TP_PROTO(struct task_struct *p, int dst_cpu, unsigned long energy, unsigned long max_util, unsigned long busy_time), TP_ARGS(p, dst_cpu, energy, max_util, busy_time)); #endif /* _TRACE_SCHED_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
2 4 1 3 2 1 1 4 2 1 1 1 2 6 6 5 5 5 5 4 2 2 2 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2008-2009 Atheros Communications Inc. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/firmware.h> #include <linux/usb.h> #include <linux/unaligned.h> #include <net/bluetooth/bluetooth.h> #define VERSION "1.0" #define ATH3K_FIRMWARE "ath3k-1.fw" #define ATH3K_DNLOAD 0x01 #define ATH3K_GETSTATE 0x05 #define ATH3K_SET_NORMAL_MODE 0x07 #define ATH3K_GETVERSION 0x09 #define USB_REG_SWITCH_VID_PID 0x0a #define ATH3K_MODE_MASK 0x3F #define ATH3K_NORMAL_MODE 0x0E #define ATH3K_PATCH_UPDATE 0x80 #define ATH3K_SYSCFG_UPDATE 0x40 #define ATH3K_XTAL_FREQ_26M 0x00 #define ATH3K_XTAL_FREQ_40M 0x01 #define ATH3K_XTAL_FREQ_19P2 0x02 #define ATH3K_NAME_LEN 0xFF struct ath3k_version { __le32 rom_version; __le32 build_version; __le32 ram_version; __u8 ref_clock; __u8 reserved[7]; } __packed; static const struct usb_device_id ath3k_table[] = { /* Atheros AR3011 */ { USB_DEVICE(0x0CF3, 0x3000) }, /* Atheros AR3011 with sflash firmware*/ { USB_DEVICE(0x0489, 0xE027) }, { USB_DEVICE(0x0489, 0xE03D) }, { USB_DEVICE(0x04F2, 0xAFF1) }, { USB_DEVICE(0x0930, 0x0215) }, { USB_DEVICE(0x0CF3, 0x3002) }, { USB_DEVICE(0x0CF3, 0xE019) }, { USB_DEVICE(0x13d3, 0x3304) }, /* Atheros AR9285 Malbec with sflash firmware */ { USB_DEVICE(0x03F0, 0x311D) }, /* Atheros AR3012 with sflash firmware*/ { USB_DEVICE(0x0489, 0xe04d) }, { USB_DEVICE(0x0489, 0xe04e) }, { USB_DEVICE(0x0489, 0xe057) }, { USB_DEVICE(0x0489, 0xe056) }, { USB_DEVICE(0x0489, 0xe05f) }, { USB_DEVICE(0x0489, 0xe076) }, { USB_DEVICE(0x0489, 0xe078) }, { USB_DEVICE(0x0489, 0xe095) }, { USB_DEVICE(0x04c5, 0x1330) }, { USB_DEVICE(0x04CA, 0x3004) }, { USB_DEVICE(0x04CA, 0x3005) }, { USB_DEVICE(0x04CA, 0x3006) }, { USB_DEVICE(0x04CA, 0x3007) }, { USB_DEVICE(0x04CA, 0x3008) }, { USB_DEVICE(0x04CA, 0x300b) }, { USB_DEVICE(0x04CA, 0x300d) }, { USB_DEVICE(0x04CA, 0x300f) }, { USB_DEVICE(0x04CA, 0x3010) }, { USB_DEVICE(0x04CA, 0x3014) }, { USB_DEVICE(0x04CA, 0x3018) }, { USB_DEVICE(0x0930, 0x0219) }, { USB_DEVICE(0x0930, 0x021c) }, { USB_DEVICE(0x0930, 0x0220) }, { USB_DEVICE(0x0930, 0x0227) }, { USB_DEVICE(0x0b05, 0x17d0) }, { USB_DEVICE(0x0CF3, 0x0036) }, { USB_DEVICE(0x0CF3, 0x3004) }, { USB_DEVICE(0x0CF3, 0x3008) }, { USB_DEVICE(0x0CF3, 0x311D) }, { USB_DEVICE(0x0CF3, 0x311E) }, { USB_DEVICE(0x0CF3, 0x311F) }, { USB_DEVICE(0x0cf3, 0x3121) }, { USB_DEVICE(0x0CF3, 0x817a) }, { USB_DEVICE(0x0CF3, 0x817b) }, { USB_DEVICE(0x0cf3, 0xe003) }, { USB_DEVICE(0x0CF3, 0xE004) }, { USB_DEVICE(0x0CF3, 0xE005) }, { USB_DEVICE(0x0CF3, 0xE006) }, { USB_DEVICE(0x13d3, 0x3362) }, { USB_DEVICE(0x13d3, 0x3375) }, { USB_DEVICE(0x13d3, 0x3393) }, { USB_DEVICE(0x13d3, 0x3395) }, { USB_DEVICE(0x13d3, 0x3402) }, { USB_DEVICE(0x13d3, 0x3408) }, { USB_DEVICE(0x13d3, 0x3423) }, { USB_DEVICE(0x13d3, 0x3432) }, { USB_DEVICE(0x13d3, 0x3472) }, { USB_DEVICE(0x13d3, 0x3474) }, { USB_DEVICE(0x13d3, 0x3487) }, { USB_DEVICE(0x13d3, 0x3490) }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xE02C) }, /* Atheros AR5BBU22 with sflash firmware */ { USB_DEVICE(0x0489, 0xE036) }, { USB_DEVICE(0x0489, 0xE03C) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, ath3k_table); #define BTUSB_ATH3012 0x80 /* This table is to load patch and sysconfig files * for AR3012 */ static const struct usb_device_id ath3k_blist_tbl[] = { /* Atheros AR3012 with sflash firmware*/ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3018), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311F), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0CF3, 0x817b), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3487), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3490), .driver_info = BTUSB_ATH3012 }, /* Atheros AR5BBU22 with sflash firmware */ { USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 }, { } /* Terminating entry */ }; static inline void ath3k_log_failed_loading(int err, int len, int size, int count) { BT_ERR("Firmware loading err = %d, len = %d, size = %d, count = %d", err, len, size, count); } #define USB_REQ_DFU_DNLOAD 1 #define BULK_SIZE 4096 #define FW_HDR_SIZE 20 #define TIMEGAP_USEC_MIN 50 #define TIMEGAP_USEC_MAX 100 static int ath3k_load_firmware(struct usb_device *udev, const struct firmware *firmware) { u8 *send_buf; int len = 0; int err, pipe, size, sent = 0; int count = firmware->size; BT_DBG("udev %p", udev); send_buf = kmalloc(BULK_SIZE, GFP_KERNEL); if (!send_buf) { BT_ERR("Can't allocate memory chunk for firmware"); return -ENOMEM; } err = usb_control_msg_send(udev, 0, USB_REQ_DFU_DNLOAD, USB_TYPE_VENDOR, 0, 0, firmware->data, FW_HDR_SIZE, USB_CTRL_SET_TIMEOUT, GFP_KERNEL); if (err) { BT_ERR("Can't change to loading configuration err"); goto error; } sent += FW_HDR_SIZE; count -= FW_HDR_SIZE; pipe = usb_sndbulkpipe(udev, 0x02); while (count) { /* workaround the compatibility issue with xHCI controller*/ usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX); size = min_t(uint, count, BULK_SIZE); memcpy(send_buf, firmware->data + sent, size); err = usb_bulk_msg(udev, pipe, send_buf, size, &len, 3000); if (err || len != size) { ath3k_log_failed_loading(err, len, size, count); goto error; } sent += size; count -= size; } error: kfree(send_buf); return err; } static int ath3k_get_state(struct usb_device *udev, unsigned char *state) { return usb_control_msg_recv(udev, 0, ATH3K_GETSTATE, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, state, 1, USB_CTRL_SET_TIMEOUT, GFP_KERNEL); } static int ath3k_get_version(struct usb_device *udev, struct ath3k_version *version) { return usb_control_msg_recv(udev, 0, ATH3K_GETVERSION, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, version, sizeof(*version), USB_CTRL_SET_TIMEOUT, GFP_KERNEL); } static int ath3k_load_fwfile(struct usb_device *udev, const struct firmware *firmware) { u8 *send_buf; int len = 0; int err, pipe, size, count, sent = 0; int ret; count = firmware->size; send_buf = kmalloc(BULK_SIZE, GFP_KERNEL); if (!send_buf) { BT_ERR("Can't allocate memory chunk for firmware"); return -ENOMEM; } size = min_t(uint, count, FW_HDR_SIZE); ret = usb_control_msg_send(udev, 0, ATH3K_DNLOAD, USB_TYPE_VENDOR, 0, 0, firmware->data, size, USB_CTRL_SET_TIMEOUT, GFP_KERNEL); if (ret) { BT_ERR("Can't change to loading configuration err"); kfree(send_buf); return ret; } sent += size; count -= size; pipe = usb_sndbulkpipe(udev, 0x02); while (count) { /* workaround the compatibility issue with xHCI controller*/ usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX); size = min_t(uint, count, BULK_SIZE); memcpy(send_buf, firmware->data + sent, size); err = usb_bulk_msg(udev, pipe, send_buf, size, &len, 3000); if (err || len != size) { ath3k_log_failed_loading(err, len, size, count); kfree(send_buf); return err; } sent += size; count -= size; } kfree(send_buf); return 0; } static void ath3k_switch_pid(struct usb_device *udev) { usb_control_msg_send(udev, 0, USB_REG_SWITCH_VID_PID, USB_TYPE_VENDOR, 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT, GFP_KERNEL); } static int ath3k_set_normal_mode(struct usb_device *udev) { unsigned char fw_state; int ret; ret = ath3k_get_state(udev, &fw_state); if (ret) { BT_ERR("Can't get state to change to normal mode err"); return ret; } if ((fw_state & ATH3K_MODE_MASK) == ATH3K_NORMAL_MODE) { BT_DBG("firmware was already in normal mode"); return 0; } return usb_control_msg_send(udev, 0, ATH3K_SET_NORMAL_MODE, USB_TYPE_VENDOR, 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT, GFP_KERNEL); } static int ath3k_load_patch(struct usb_device *udev) { unsigned char fw_state; char filename[ATH3K_NAME_LEN]; const struct firmware *firmware; struct ath3k_version fw_version; __u32 pt_rom_version, pt_build_version; int ret; ret = ath3k_get_state(udev, &fw_state); if (ret) { BT_ERR("Can't get state to change to load ram patch err"); return ret; } if (fw_state & ATH3K_PATCH_UPDATE) { BT_DBG("Patch was already downloaded"); return 0; } ret = ath3k_get_version(udev, &fw_version); if (ret) { BT_ERR("Can't get version to change to load ram patch err"); return ret; } snprintf(filename, ATH3K_NAME_LEN, "ar3k/AthrBT_0x%08x.dfu", le32_to_cpu(fw_version.rom_version)); ret = request_firmware(&firmware, filename, &udev->dev); if (ret < 0) { BT_ERR("Patch file not found %s", filename); return ret; } pt_rom_version = get_unaligned_le32(firmware->data + firmware->size - 8); pt_build_version = get_unaligned_le32(firmware->data + firmware->size - 4); if (pt_rom_version != le32_to_cpu(fw_version.rom_version) || pt_build_version <= le32_to_cpu(fw_version.build_version)) { BT_ERR("Patch file version did not match with firmware"); release_firmware(firmware); return -EINVAL; } ret = ath3k_load_fwfile(udev, firmware); release_firmware(firmware); return ret; } static int ath3k_load_syscfg(struct usb_device *udev) { unsigned char fw_state; char filename[ATH3K_NAME_LEN]; const struct firmware *firmware; struct ath3k_version fw_version; int clk_value, ret; ret = ath3k_get_state(udev, &fw_state); if (ret) { BT_ERR("Can't get state to change to load configuration err"); return -EBUSY; } ret = ath3k_get_version(udev, &fw_version); if (ret) { BT_ERR("Can't get version to change to load ram patch err"); return ret; } switch (fw_version.ref_clock) { case ATH3K_XTAL_FREQ_26M: clk_value = 26; break; case ATH3K_XTAL_FREQ_40M: clk_value = 40; break; case ATH3K_XTAL_FREQ_19P2: clk_value = 19; break; default: clk_value = 0; break; } snprintf(filename, ATH3K_NAME_LEN, "ar3k/ramps_0x%08x_%d%s", le32_to_cpu(fw_version.rom_version), clk_value, ".dfu"); ret = request_firmware(&firmware, filename, &udev->dev); if (ret < 0) { BT_ERR("Configuration file not found %s", filename); return ret; } ret = ath3k_load_fwfile(udev, firmware); release_firmware(firmware); return ret; } static int ath3k_probe(struct usb_interface *intf, const struct usb_device_id *id) { const struct firmware *firmware; struct usb_device *udev = interface_to_usbdev(intf); int ret; BT_DBG("intf %p id %p", intf, id); if (intf->cur_altsetting->desc.bInterfaceNumber != 0) return -ENODEV; /* match device ID in ath3k blacklist table */ if (!id->driver_info) { const struct usb_device_id *match; match = usb_match_id(intf, ath3k_blist_tbl); if (match) id = match; } /* load patch and sysconfig files for AR3012 */ if (id->driver_info & BTUSB_ATH3012) { /* New firmware with patch and sysconfig files already loaded */ if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x0001) return -ENODEV; ret = ath3k_load_patch(udev); if (ret < 0) { BT_ERR("Loading patch file failed"); return ret; } ret = ath3k_load_syscfg(udev); if (ret < 0) { BT_ERR("Loading sysconfig file failed"); return ret; } ret = ath3k_set_normal_mode(udev); if (ret) { BT_ERR("Set normal mode failed"); return ret; } ath3k_switch_pid(udev); return 0; } ret = request_firmware(&firmware, ATH3K_FIRMWARE, &udev->dev); if (ret < 0) { if (ret == -ENOENT) BT_ERR("Firmware file \"%s\" not found", ATH3K_FIRMWARE); else BT_ERR("Firmware file \"%s\" request failed (err=%d)", ATH3K_FIRMWARE, ret); return ret; } ret = ath3k_load_firmware(udev, firmware); release_firmware(firmware); return ret; } static void ath3k_disconnect(struct usb_interface *intf) { BT_DBG("%s intf %p", __func__, intf); } static struct usb_driver ath3k_driver = { .name = "ath3k", .probe = ath3k_probe, .disconnect = ath3k_disconnect, .id_table = ath3k_table, .disable_hub_initiated_lpm = 1, }; module_usb_driver(ath3k_driver); MODULE_AUTHOR("Atheros Communications"); MODULE_DESCRIPTION("Atheros AR30xx firmware driver"); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(ATH3K_FIRMWARE);
20 20 20 20 20 20 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 // SPDX-License-Identifier: GPL-2.0 /* * Shared Memory Communications over RDMA (SMC-R) and RoCE * * smc_sysctl.c: sysctl interface to SMC subsystem. * * Copyright (c) 2022, Alibaba Inc. * * Author: Tony Lu <tonylu@linux.alibaba.com> * */ #include <linux/init.h> #include <linux/sysctl.h> #include <net/net_namespace.h> #include "smc.h" #include "smc_core.h" #include "smc_llc.h" #include "smc_sysctl.h" static int min_sndbuf = SMC_BUF_MIN_SIZE; static int min_rcvbuf = SMC_BUF_MIN_SIZE; static int max_sndbuf = INT_MAX / 2; static int max_rcvbuf = INT_MAX / 2; static const int net_smc_wmem_init = (64 * 1024); static const int net_smc_rmem_init = (64 * 1024); static int links_per_lgr_min = SMC_LINKS_ADD_LNK_MIN; static int links_per_lgr_max = SMC_LINKS_ADD_LNK_MAX; static int conns_per_lgr_min = SMC_CONN_PER_LGR_MIN; static int conns_per_lgr_max = SMC_CONN_PER_LGR_MAX; static struct ctl_table smc_table[] = { { .procname = "autocorking_size", .data = &init_net.smc.sysctl_autocorking_size, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_douintvec, }, { .procname = "smcr_buf_type", .data = &init_net.smc.sysctl_smcr_buf_type, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_douintvec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_TWO, }, { .procname = "smcr_testlink_time", .data = &init_net.smc.sysctl_smcr_testlink_time, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "wmem", .data = &init_net.smc.sysctl_wmem, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_sndbuf, .extra2 = &max_sndbuf, }, { .procname = "rmem", .data = &init_net.smc.sysctl_rmem, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &min_rcvbuf, .extra2 = &max_rcvbuf, }, { .procname = "smcr_max_links_per_lgr", .data = &init_net.smc.sysctl_max_links_per_lgr, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &links_per_lgr_min, .extra2 = &links_per_lgr_max, }, { .procname = "smcr_max_conns_per_lgr", .data = &init_net.smc.sysctl_max_conns_per_lgr, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &conns_per_lgr_min, .extra2 = &conns_per_lgr_max, }, { .procname = "limit_smc_hs", .data = &init_net.smc.limit_smc_hs, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, }; int __net_init smc_sysctl_net_init(struct net *net) { size_t table_size = ARRAY_SIZE(smc_table); struct ctl_table *table; table = smc_table; if (!net_eq(net, &init_net)) { int i; table = kmemdup(table, sizeof(smc_table), GFP_KERNEL); if (!table) goto err_alloc; for (i = 0; i < table_size; i++) table[i].data += (void *)net - (void *)&init_net; } net->smc.smc_hdr = register_net_sysctl_sz(net, "net/smc", table, table_size); if (!net->smc.smc_hdr) goto err_reg; net->smc.sysctl_autocorking_size = SMC_AUTOCORKING_DEFAULT_SIZE; net->smc.sysctl_smcr_buf_type = SMCR_PHYS_CONT_BUFS; net->smc.sysctl_smcr_testlink_time = SMC_LLC_TESTLINK_DEFAULT_TIME; WRITE_ONCE(net->smc.sysctl_wmem, net_smc_wmem_init); WRITE_ONCE(net->smc.sysctl_rmem, net_smc_rmem_init); net->smc.sysctl_max_links_per_lgr = SMC_LINKS_PER_LGR_MAX_PREFER; net->smc.sysctl_max_conns_per_lgr = SMC_CONN_PER_LGR_PREFER; /* disable handshake limitation by default */ net->smc.limit_smc_hs = 0; return 0; err_reg: if (!net_eq(net, &init_net)) kfree(table); err_alloc: return -ENOMEM; } void __net_exit smc_sysctl_net_exit(struct net *net) { const struct ctl_table *table; table = net->smc.smc_hdr->ctl_table_arg; unregister_net_sysctl_table(net->smc.smc_hdr); if (!net_eq(net, &init_net)) kfree(table); }
22 1 21 1 20 1 19 1 18 1 17 1 16 5 4 1 3 1 14 14 5 1 13 1 1 1 3 1 22 3 30 28 30 25 29 30 30 30 3 3 2 1 1 1 3 1 3 3 3 2 2 1 3 2 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 // SPDX-License-Identifier: GPL-2.0 /* Copyright 2011-2014 Autronica Fire and Security AS * * Author(s): * 2011-2014 Arvid Brodin, arvid.brodin@alten.se * * Routines for handling Netlink messages for HSR and PRP. */ #include "hsr_netlink.h" #include <linux/kernel.h> #include <net/rtnetlink.h> #include <net/genetlink.h> #include "hsr_main.h" #include "hsr_device.h" #include "hsr_framereg.h" static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = { [IFLA_HSR_SLAVE1] = { .type = NLA_U32 }, [IFLA_HSR_SLAVE2] = { .type = NLA_U32 }, [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 }, [IFLA_HSR_VERSION] = { .type = NLA_U8 }, [IFLA_HSR_SUPERVISION_ADDR] = { .len = ETH_ALEN }, [IFLA_HSR_SEQ_NR] = { .type = NLA_U16 }, [IFLA_HSR_PROTOCOL] = { .type = NLA_U8 }, [IFLA_HSR_INTERLINK] = { .type = NLA_U32 }, }; /* Here, it seems a netdevice has already been allocated for us, and the * hsr_dev_setup routine has been executed. Nice! */ static int hsr_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { enum hsr_version proto_version; unsigned char multicast_spec; u8 proto = HSR_PROTOCOL_HSR; struct net_device *link[2], *interlink = NULL; if (!data) { NL_SET_ERR_MSG_MOD(extack, "No slave devices specified"); return -EINVAL; } if (!data[IFLA_HSR_SLAVE1]) { NL_SET_ERR_MSG_MOD(extack, "Slave1 device not specified"); return -EINVAL; } link[0] = __dev_get_by_index(src_net, nla_get_u32(data[IFLA_HSR_SLAVE1])); if (!link[0]) { NL_SET_ERR_MSG_MOD(extack, "Slave1 does not exist"); return -EINVAL; } if (!data[IFLA_HSR_SLAVE2]) { NL_SET_ERR_MSG_MOD(extack, "Slave2 device not specified"); return -EINVAL; } link[1] = __dev_get_by_index(src_net, nla_get_u32(data[IFLA_HSR_SLAVE2])); if (!link[1]) { NL_SET_ERR_MSG_MOD(extack, "Slave2 does not exist"); return -EINVAL; } if (link[0] == link[1]) { NL_SET_ERR_MSG_MOD(extack, "Slave1 and Slave2 are same"); return -EINVAL; } if (data[IFLA_HSR_INTERLINK]) interlink = __dev_get_by_index(src_net, nla_get_u32(data[IFLA_HSR_INTERLINK])); if (interlink && interlink == link[0]) { NL_SET_ERR_MSG_MOD(extack, "Interlink and Slave1 are the same"); return -EINVAL; } if (interlink && interlink == link[1]) { NL_SET_ERR_MSG_MOD(extack, "Interlink and Slave2 are the same"); return -EINVAL; } multicast_spec = nla_get_u8_default(data[IFLA_HSR_MULTICAST_SPEC], 0); if (data[IFLA_HSR_PROTOCOL]) proto = nla_get_u8(data[IFLA_HSR_PROTOCOL]); if (proto >= HSR_PROTOCOL_MAX) { NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol"); return -EINVAL; } if (!data[IFLA_HSR_VERSION]) { proto_version = HSR_V0; } else { if (proto == HSR_PROTOCOL_PRP) { NL_SET_ERR_MSG_MOD(extack, "PRP version unsupported"); return -EINVAL; } proto_version = nla_get_u8(data[IFLA_HSR_VERSION]); if (proto_version > HSR_V1) { NL_SET_ERR_MSG_MOD(extack, "Only HSR version 0/1 supported"); return -EINVAL; } } if (proto == HSR_PROTOCOL_PRP) { proto_version = PRP_V1; if (interlink) { NL_SET_ERR_MSG_MOD(extack, "Interlink only works with HSR"); return -EINVAL; } } return hsr_dev_finalize(dev, link, interlink, multicast_spec, proto_version, extack); } static void hsr_dellink(struct net_device *dev, struct list_head *head) { struct hsr_priv *hsr = netdev_priv(dev); timer_delete_sync(&hsr->prune_timer); timer_delete_sync(&hsr->prune_proxy_timer); timer_delete_sync(&hsr->announce_timer); timer_delete_sync(&hsr->announce_proxy_timer); hsr_debugfs_term(hsr); hsr_del_ports(hsr); hsr_del_self_node(hsr); hsr_del_nodes(&hsr->node_db); hsr_del_nodes(&hsr->proxy_node_db); unregister_netdevice_queue(dev, head); } static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct hsr_priv *hsr = netdev_priv(dev); u8 proto = HSR_PROTOCOL_HSR; struct hsr_port *port; port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A); if (port) { if (nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex)) goto nla_put_failure; } port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B); if (port) { if (nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex)) goto nla_put_failure; } if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN, hsr->sup_multicast_addr) || nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr)) goto nla_put_failure; if (hsr->prot_version == PRP_V1) proto = HSR_PROTOCOL_PRP; if (nla_put_u8(skb, IFLA_HSR_PROTOCOL, proto)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } static struct rtnl_link_ops hsr_link_ops __read_mostly = { .kind = "hsr", .maxtype = IFLA_HSR_MAX, .policy = hsr_policy, .priv_size = sizeof(struct hsr_priv), .setup = hsr_dev_setup, .newlink = hsr_newlink, .dellink = hsr_dellink, .fill_info = hsr_fill_info, }; /* attribute policy */ static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = { [HSR_A_NODE_ADDR] = { .len = ETH_ALEN }, [HSR_A_NODE_ADDR_B] = { .len = ETH_ALEN }, [HSR_A_IFINDEX] = { .type = NLA_U32 }, [HSR_A_IF1_AGE] = { .type = NLA_U32 }, [HSR_A_IF2_AGE] = { .type = NLA_U32 }, [HSR_A_IF1_SEQ] = { .type = NLA_U16 }, [HSR_A_IF2_SEQ] = { .type = NLA_U16 }, }; static struct genl_family hsr_genl_family; static const struct genl_multicast_group hsr_mcgrps[] = { { .name = "hsr-network", }, }; /* This is called if for some node with MAC address addr, we only get frames * over one of the slave interfaces. This would indicate an open network ring * (i.e. a link has failed somewhere). */ void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN], struct hsr_port *port) { struct sk_buff *skb; void *msg_head; struct hsr_port *master; int res; skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); if (!skb) goto fail; msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_RING_ERROR); if (!msg_head) goto nla_put_failure; res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr); if (res < 0) goto nla_put_failure; res = nla_put_u32(skb, HSR_A_IFINDEX, port->dev->ifindex); if (res < 0) goto nla_put_failure; genlmsg_end(skb, msg_head); genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC); return; nla_put_failure: kfree_skb(skb); fail: rcu_read_lock(); master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); netdev_warn(master->dev, "Could not send HSR ring error message\n"); rcu_read_unlock(); } /* This is called when we haven't heard from the node with MAC address addr for * some time (just before the node is removed from the node table/list). */ void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN]) { struct sk_buff *skb; void *msg_head; struct hsr_port *master; int res; skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); if (!skb) goto fail; msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_NODE_DOWN); if (!msg_head) goto nla_put_failure; res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr); if (res < 0) goto nla_put_failure; genlmsg_end(skb, msg_head); genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC); return; nla_put_failure: kfree_skb(skb); fail: rcu_read_lock(); master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); netdev_warn(master->dev, "Could not send HSR node down\n"); rcu_read_unlock(); } /* HSR_C_GET_NODE_STATUS lets userspace query the internal HSR node table * about the status of a specific node in the network, defined by its MAC * address. * * Input: hsr ifindex, node mac address * Output: hsr ifindex, node mac address (copied from request), * age of latest frame from node over slave 1, slave 2 [ms] */ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info) { /* For receiving */ struct nlattr *na; struct net_device *hsr_dev; /* For sending */ struct sk_buff *skb_out; void *msg_head; struct hsr_priv *hsr; struct hsr_port *port; unsigned char hsr_node_addr_b[ETH_ALEN]; int hsr_node_if1_age; u16 hsr_node_if1_seq; int hsr_node_if2_age; u16 hsr_node_if2_seq; int addr_b_ifindex; int res; if (!info) goto invalid; na = info->attrs[HSR_A_IFINDEX]; if (!na) goto invalid; na = info->attrs[HSR_A_NODE_ADDR]; if (!na) goto invalid; rcu_read_lock(); hsr_dev = dev_get_by_index_rcu(genl_info_net(info), nla_get_u32(info->attrs[HSR_A_IFINDEX])); if (!hsr_dev) goto rcu_unlock; if (!is_hsr_master(hsr_dev)) goto rcu_unlock; /* Send reply */ skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); if (!skb_out) { res = -ENOMEM; goto fail; } msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid, info->snd_seq, &hsr_genl_family, 0, HSR_C_SET_NODE_STATUS); if (!msg_head) { res = -ENOMEM; goto nla_put_failure; } res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex); if (res < 0) goto nla_put_failure; hsr = netdev_priv(hsr_dev); res = hsr_get_node_data(hsr, (unsigned char *) nla_data(info->attrs[HSR_A_NODE_ADDR]), hsr_node_addr_b, &addr_b_ifindex, &hsr_node_if1_age, &hsr_node_if1_seq, &hsr_node_if2_age, &hsr_node_if2_seq); if (res < 0) goto nla_put_failure; res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, nla_data(info->attrs[HSR_A_NODE_ADDR])); if (res < 0) goto nla_put_failure; if (addr_b_ifindex > -1) { res = nla_put(skb_out, HSR_A_NODE_ADDR_B, ETH_ALEN, hsr_node_addr_b); if (res < 0) goto nla_put_failure; res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX, addr_b_ifindex); if (res < 0) goto nla_put_failure; } res = nla_put_u32(skb_out, HSR_A_IF1_AGE, hsr_node_if1_age); if (res < 0) goto nla_put_failure; res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq); if (res < 0) goto nla_put_failure; port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A); if (port) res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX, port->dev->ifindex); if (res < 0) goto nla_put_failure; res = nla_put_u32(skb_out, HSR_A_IF2_AGE, hsr_node_if2_age); if (res < 0) goto nla_put_failure; res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq); if (res < 0) goto nla_put_failure; port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B); if (port) res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX, port->dev->ifindex); if (res < 0) goto nla_put_failure; rcu_read_unlock(); genlmsg_end(skb_out, msg_head); genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid); return 0; rcu_unlock: rcu_read_unlock(); invalid: netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL); return 0; nla_put_failure: kfree_skb(skb_out); /* Fall through */ fail: rcu_read_unlock(); return res; } /* Get a list of MacAddressA of all nodes known to this node (including self). */ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info) { unsigned char addr[ETH_ALEN]; struct net_device *hsr_dev; struct sk_buff *skb_out; struct hsr_priv *hsr; bool restart = false; struct nlattr *na; void *pos = NULL; void *msg_head; int res; if (!info) goto invalid; na = info->attrs[HSR_A_IFINDEX]; if (!na) goto invalid; rcu_read_lock(); hsr_dev = dev_get_by_index_rcu(genl_info_net(info), nla_get_u32(info->attrs[HSR_A_IFINDEX])); if (!hsr_dev) goto rcu_unlock; if (!is_hsr_master(hsr_dev)) goto rcu_unlock; restart: /* Send reply */ skb_out = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb_out) { res = -ENOMEM; goto fail; } msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid, info->snd_seq, &hsr_genl_family, 0, HSR_C_SET_NODE_LIST); if (!msg_head) { res = -ENOMEM; goto nla_put_failure; } if (!restart) { res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex); if (res < 0) goto nla_put_failure; } hsr = netdev_priv(hsr_dev); if (!pos) pos = hsr_get_next_node(hsr, NULL, addr); while (pos) { res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr); if (res < 0) { if (res == -EMSGSIZE) { genlmsg_end(skb_out, msg_head); genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid); restart = true; goto restart; } goto nla_put_failure; } pos = hsr_get_next_node(hsr, pos, addr); } rcu_read_unlock(); genlmsg_end(skb_out, msg_head); genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid); return 0; rcu_unlock: rcu_read_unlock(); invalid: netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL); return 0; nla_put_failure: nlmsg_free(skb_out); /* Fall through */ fail: rcu_read_unlock(); return res; } static const struct genl_small_ops hsr_ops[] = { { .cmd = HSR_C_GET_NODE_STATUS, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = 0, .doit = hsr_get_node_status, .dumpit = NULL, }, { .cmd = HSR_C_GET_NODE_LIST, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = 0, .doit = hsr_get_node_list, .dumpit = NULL, }, }; static struct genl_family hsr_genl_family __ro_after_init = { .hdrsize = 0, .name = "HSR", .version = 1, .maxattr = HSR_A_MAX, .policy = hsr_genl_policy, .netnsok = true, .module = THIS_MODULE, .small_ops = hsr_ops, .n_small_ops = ARRAY_SIZE(hsr_ops), .resv_start_op = HSR_C_SET_NODE_LIST + 1, .mcgrps = hsr_mcgrps, .n_mcgrps = ARRAY_SIZE(hsr_mcgrps), }; int __init hsr_netlink_init(void) { int rc; rc = rtnl_link_register(&hsr_link_ops); if (rc) goto fail_rtnl_link_register; rc = genl_register_family(&hsr_genl_family); if (rc) goto fail_genl_register_family; hsr_debugfs_create_root(); return 0; fail_genl_register_family: rtnl_link_unregister(&hsr_link_ops); fail_rtnl_link_register: return rc; } void __exit hsr_netlink_exit(void) { genl_unregister_family(&hsr_genl_family); rtnl_link_unregister(&hsr_link_ops); } MODULE_ALIAS_RTNL_LINK("hsr");
30 31 31 1 110 110 109 108 56 57 57 70 4 4 1 1 1 1 4 3 3 4 4 4 4 4 4 4 4 1 1 1 1 167 158 140 130 140 138 139 89 88 89 231 165 140 89 89 230 229 232 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/kernel.h> #include <linux/slab.h> #include <net/act_api.h> #include <net/flow_offload.h> #include <linux/rtnetlink.h> #include <linux/mutex.h> #include <linux/rhashtable.h> struct flow_rule *flow_rule_alloc(unsigned int num_actions) { struct flow_rule *rule; int i; rule = kzalloc(struct_size(rule, action.entries, num_actions), GFP_KERNEL); if (!rule) return NULL; rule->action.num_entries = num_actions; /* Pre-fill each action hw_stats with DONT_CARE. * Caller can override this if it wants stats for a given action. */ for (i = 0; i < num_actions; i++) rule->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE; return rule; } EXPORT_SYMBOL(flow_rule_alloc); struct flow_offload_action *offload_action_alloc(unsigned int num_actions) { struct flow_offload_action *fl_action; int i; fl_action = kzalloc(struct_size(fl_action, action.entries, num_actions), GFP_KERNEL); if (!fl_action) return NULL; fl_action->action.num_entries = num_actions; /* Pre-fill each action hw_stats with DONT_CARE. * Caller can override this if it wants stats for a given action. */ for (i = 0; i < num_actions; i++) fl_action->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE; return fl_action; } #define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \ const struct flow_match *__m = &(__rule)->match; \ struct flow_dissector *__d = (__m)->dissector; \ \ (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \ (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \ void flow_rule_match_meta(const struct flow_rule *rule, struct flow_match_meta *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out); } EXPORT_SYMBOL(flow_rule_match_meta); void flow_rule_match_basic(const struct flow_rule *rule, struct flow_match_basic *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out); } EXPORT_SYMBOL(flow_rule_match_basic); void flow_rule_match_control(const struct flow_rule *rule, struct flow_match_control *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out); } EXPORT_SYMBOL(flow_rule_match_control); void flow_rule_match_eth_addrs(const struct flow_rule *rule, struct flow_match_eth_addrs *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out); } EXPORT_SYMBOL(flow_rule_match_eth_addrs); void flow_rule_match_vlan(const struct flow_rule *rule, struct flow_match_vlan *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out); } EXPORT_SYMBOL(flow_rule_match_vlan); void flow_rule_match_cvlan(const struct flow_rule *rule, struct flow_match_vlan *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out); } EXPORT_SYMBOL(flow_rule_match_cvlan); void flow_rule_match_arp(const struct flow_rule *rule, struct flow_match_arp *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ARP, out); } EXPORT_SYMBOL(flow_rule_match_arp); void flow_rule_match_ipv4_addrs(const struct flow_rule *rule, struct flow_match_ipv4_addrs *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out); } EXPORT_SYMBOL(flow_rule_match_ipv4_addrs); void flow_rule_match_ipv6_addrs(const struct flow_rule *rule, struct flow_match_ipv6_addrs *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out); } EXPORT_SYMBOL(flow_rule_match_ipv6_addrs); void flow_rule_match_ip(const struct flow_rule *rule, struct flow_match_ip *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out); } EXPORT_SYMBOL(flow_rule_match_ip); void flow_rule_match_ports(const struct flow_rule *rule, struct flow_match_ports *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out); } EXPORT_SYMBOL(flow_rule_match_ports); void flow_rule_match_ports_range(const struct flow_rule *rule, struct flow_match_ports_range *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS_RANGE, out); } EXPORT_SYMBOL(flow_rule_match_ports_range); void flow_rule_match_tcp(const struct flow_rule *rule, struct flow_match_tcp *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out); } EXPORT_SYMBOL(flow_rule_match_tcp); void flow_rule_match_ipsec(const struct flow_rule *rule, struct flow_match_ipsec *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPSEC, out); } EXPORT_SYMBOL(flow_rule_match_ipsec); void flow_rule_match_icmp(const struct flow_rule *rule, struct flow_match_icmp *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out); } EXPORT_SYMBOL(flow_rule_match_icmp); void flow_rule_match_mpls(const struct flow_rule *rule, struct flow_match_mpls *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out); } EXPORT_SYMBOL(flow_rule_match_mpls); void flow_rule_match_enc_control(const struct flow_rule *rule, struct flow_match_control *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out); } EXPORT_SYMBOL(flow_rule_match_enc_control); void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, struct flow_match_ipv4_addrs *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out); } EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs); void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, struct flow_match_ipv6_addrs *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out); } EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs); void flow_rule_match_enc_ip(const struct flow_rule *rule, struct flow_match_ip *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out); } EXPORT_SYMBOL(flow_rule_match_enc_ip); void flow_rule_match_enc_ports(const struct flow_rule *rule, struct flow_match_ports *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out); } EXPORT_SYMBOL(flow_rule_match_enc_ports); void flow_rule_match_enc_keyid(const struct flow_rule *rule, struct flow_match_enc_keyid *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out); } EXPORT_SYMBOL(flow_rule_match_enc_keyid); void flow_rule_match_enc_opts(const struct flow_rule *rule, struct flow_match_enc_opts *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out); } EXPORT_SYMBOL(flow_rule_match_enc_opts); struct flow_action_cookie *flow_action_cookie_create(void *data, unsigned int len, gfp_t gfp) { struct flow_action_cookie *cookie; cookie = kmalloc(sizeof(*cookie) + len, gfp); if (!cookie) return NULL; cookie->cookie_len = len; memcpy(cookie->cookie, data, len); return cookie; } EXPORT_SYMBOL(flow_action_cookie_create); void flow_action_cookie_destroy(struct flow_action_cookie *cookie) { kfree(cookie); } EXPORT_SYMBOL(flow_action_cookie_destroy); void flow_rule_match_ct(const struct flow_rule *rule, struct flow_match_ct *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CT, out); } EXPORT_SYMBOL(flow_rule_match_ct); void flow_rule_match_pppoe(const struct flow_rule *rule, struct flow_match_pppoe *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PPPOE, out); } EXPORT_SYMBOL(flow_rule_match_pppoe); void flow_rule_match_l2tpv3(const struct flow_rule *rule, struct flow_match_l2tpv3 *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_L2TPV3, out); } EXPORT_SYMBOL(flow_rule_match_l2tpv3); struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb, void *cb_ident, void *cb_priv, void (*release)(void *cb_priv)) { struct flow_block_cb *block_cb; block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL); if (!block_cb) return ERR_PTR(-ENOMEM); block_cb->cb = cb; block_cb->cb_ident = cb_ident; block_cb->cb_priv = cb_priv; block_cb->release = release; return block_cb; } EXPORT_SYMBOL(flow_block_cb_alloc); void flow_block_cb_free(struct flow_block_cb *block_cb) { if (block_cb->release) block_cb->release(block_cb->cb_priv); kfree(block_cb); } EXPORT_SYMBOL(flow_block_cb_free); struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block, flow_setup_cb_t *cb, void *cb_ident) { struct flow_block_cb *block_cb; list_for_each_entry(block_cb, &block->cb_list, list) { if (block_cb->cb == cb && block_cb->cb_ident == cb_ident) return block_cb; } return NULL; } EXPORT_SYMBOL(flow_block_cb_lookup); void *flow_block_cb_priv(struct flow_block_cb *block_cb) { return block_cb->cb_priv; } EXPORT_SYMBOL(flow_block_cb_priv); void flow_block_cb_incref(struct flow_block_cb *block_cb) { block_cb->refcnt++; } EXPORT_SYMBOL(flow_block_cb_incref); unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb) { return --block_cb->refcnt; } EXPORT_SYMBOL(flow_block_cb_decref); bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident, struct list_head *driver_block_list) { struct flow_block_cb *block_cb; list_for_each_entry(block_cb, driver_block_list, driver_list) { if (block_cb->cb == cb && block_cb->cb_ident == cb_ident) return true; } return false; } EXPORT_SYMBOL(flow_block_cb_is_busy); int flow_block_cb_setup_simple(struct flow_block_offload *f, struct list_head *driver_block_list, flow_setup_cb_t *cb, void *cb_ident, void *cb_priv, bool ingress_only) { struct flow_block_cb *block_cb; if (ingress_only && f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; f->driver_block_list = driver_block_list; switch (f->command) { case FLOW_BLOCK_BIND: if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list)) return -EBUSY; block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL); if (IS_ERR(block_cb)) return PTR_ERR(block_cb); flow_block_cb_add(block_cb, f); list_add_tail(&block_cb->driver_list, driver_block_list); return 0; case FLOW_BLOCK_UNBIND: block_cb = flow_block_cb_lookup(f->block, cb, cb_ident); if (!block_cb) return -ENOENT; flow_block_cb_remove(block_cb, f); list_del(&block_cb->driver_list); return 0; default: return -EOPNOTSUPP; } } EXPORT_SYMBOL(flow_block_cb_setup_simple); static DEFINE_MUTEX(flow_indr_block_lock); static LIST_HEAD(flow_block_indr_list); static LIST_HEAD(flow_block_indr_dev_list); static LIST_HEAD(flow_indir_dev_list); struct flow_indr_dev { struct list_head list; flow_indr_block_bind_cb_t *cb; void *cb_priv; refcount_t refcnt; }; static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb, void *cb_priv) { struct flow_indr_dev *indr_dev; indr_dev = kmalloc(sizeof(*indr_dev), GFP_KERNEL); if (!indr_dev) return NULL; indr_dev->cb = cb; indr_dev->cb_priv = cb_priv; refcount_set(&indr_dev->refcnt, 1); return indr_dev; } struct flow_indir_dev_info { void *data; struct net_device *dev; struct Qdisc *sch; enum tc_setup_type type; void (*cleanup)(struct flow_block_cb *block_cb); struct list_head list; enum flow_block_command command; enum flow_block_binder_type binder_type; struct list_head *cb_list; }; static void existing_qdiscs_register(flow_indr_block_bind_cb_t *cb, void *cb_priv) { struct flow_block_offload bo; struct flow_indir_dev_info *cur; list_for_each_entry(cur, &flow_indir_dev_list, list) { memset(&bo, 0, sizeof(bo)); bo.command = cur->command; bo.binder_type = cur->binder_type; INIT_LIST_HEAD(&bo.cb_list); cb(cur->dev, cur->sch, cb_priv, cur->type, &bo, cur->data, cur->cleanup); list_splice(&bo.cb_list, cur->cb_list); } } int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv) { struct flow_indr_dev *indr_dev; mutex_lock(&flow_indr_block_lock); list_for_each_entry(indr_dev, &flow_block_indr_dev_list, list) { if (indr_dev->cb == cb && indr_dev->cb_priv == cb_priv) { refcount_inc(&indr_dev->refcnt); mutex_unlock(&flow_indr_block_lock); return 0; } } indr_dev = flow_indr_dev_alloc(cb, cb_priv); if (!indr_dev) { mutex_unlock(&flow_indr_block_lock); return -ENOMEM; } list_add(&indr_dev->list, &flow_block_indr_dev_list); existing_qdiscs_register(cb, cb_priv); mutex_unlock(&flow_indr_block_lock); tcf_action_reoffload_cb(cb, cb_priv, true); return 0; } EXPORT_SYMBOL(flow_indr_dev_register); static void __flow_block_indr_cleanup(void (*release)(void *cb_priv), void *cb_priv, struct list_head *cleanup_list) { struct flow_block_cb *this, *next; list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) { if (this->release == release && this->indr.cb_priv == cb_priv) list_move(&this->indr.list, cleanup_list); } } static void flow_block_indr_notify(struct list_head *cleanup_list) { struct flow_block_cb *this, *next; list_for_each_entry_safe(this, next, cleanup_list, indr.list) { list_del(&this->indr.list); this->indr.cleanup(this); } } void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv, void (*release)(void *cb_priv)) { struct flow_indr_dev *this, *next, *indr_dev = NULL; LIST_HEAD(cleanup_list); mutex_lock(&flow_indr_block_lock); list_for_each_entry_safe(this, next, &flow_block_indr_dev_list, list) { if (this->cb == cb && this->cb_priv == cb_priv && refcount_dec_and_test(&this->refcnt)) { indr_dev = this; list_del(&indr_dev->list); break; } } if (!indr_dev) { mutex_unlock(&flow_indr_block_lock); return; } __flow_block_indr_cleanup(release, cb_priv, &cleanup_list); mutex_unlock(&flow_indr_block_lock); tcf_action_reoffload_cb(cb, cb_priv, false); flow_block_indr_notify(&cleanup_list); kfree(indr_dev); } EXPORT_SYMBOL(flow_indr_dev_unregister); static void flow_block_indr_init(struct flow_block_cb *flow_block, struct flow_block_offload *bo, struct net_device *dev, struct Qdisc *sch, void *data, void *cb_priv, void (*cleanup)(struct flow_block_cb *block_cb)) { flow_block->indr.binder_type = bo->binder_type; flow_block->indr.data = data; flow_block->indr.cb_priv = cb_priv; flow_block->indr.dev = dev; flow_block->indr.sch = sch; flow_block->indr.cleanup = cleanup; } struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb, void *cb_ident, void *cb_priv, void (*release)(void *cb_priv), struct flow_block_offload *bo, struct net_device *dev, struct Qdisc *sch, void *data, void *indr_cb_priv, void (*cleanup)(struct flow_block_cb *block_cb)) { struct flow_block_cb *block_cb; block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, release); if (IS_ERR(block_cb)) goto out; flow_block_indr_init(block_cb, bo, dev, sch, data, indr_cb_priv, cleanup); list_add(&block_cb->indr.list, &flow_block_indr_list); out: return block_cb; } EXPORT_SYMBOL(flow_indr_block_cb_alloc); static struct flow_indir_dev_info *find_indir_dev(void *data) { struct flow_indir_dev_info *cur; list_for_each_entry(cur, &flow_indir_dev_list, list) { if (cur->data == data) return cur; } return NULL; } static int indir_dev_add(void *data, struct net_device *dev, struct Qdisc *sch, enum tc_setup_type type, void (*cleanup)(struct flow_block_cb *block_cb), struct flow_block_offload *bo) { struct flow_indir_dev_info *info; info = find_indir_dev(data); if (info) return -EEXIST; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->data = data; info->dev = dev; info->sch = sch; info->type = type; info->cleanup = cleanup; info->command = bo->command; info->binder_type = bo->binder_type; info->cb_list = bo->cb_list_head; list_add(&info->list, &flow_indir_dev_list); return 0; } static int indir_dev_remove(void *data) { struct flow_indir_dev_info *info; info = find_indir_dev(data); if (!info) return -ENOENT; list_del(&info->list); kfree(info); return 0; } int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch, enum tc_setup_type type, void *data, struct flow_block_offload *bo, void (*cleanup)(struct flow_block_cb *block_cb)) { struct flow_indr_dev *this; u32 count = 0; int err; mutex_lock(&flow_indr_block_lock); if (bo) { if (bo->command == FLOW_BLOCK_BIND) indir_dev_add(data, dev, sch, type, cleanup, bo); else if (bo->command == FLOW_BLOCK_UNBIND) indir_dev_remove(data); } list_for_each_entry(this, &flow_block_indr_dev_list, list) { err = this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup); if (!err) count++; } mutex_unlock(&flow_indr_block_lock); return (bo && list_empty(&bo->cb_list)) ? -EOPNOTSUPP : count; } EXPORT_SYMBOL(flow_indr_dev_setup_offload); bool flow_indr_dev_exists(void) { return !list_empty(&flow_block_indr_dev_list); } EXPORT_SYMBOL(flow_indr_dev_exists);
15 15 249 4 4 4 4 4 4 29 29 27 27 27 29 29 28 27 26 26 23 24 23 23 22 22 7 2 22 22 21 19 19 19 19 8 22 22 22 22 18 22 22 22 22 22 22 8 8 15 15 15 4 3 4 3 3 2 1 1 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 // SPDX-License-Identifier: GPL-2.0 /* * * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved. * */ #include <linux/fs.h> #include "debug.h" #include "ntfs.h" #include "ntfs_fs.h" /* * al_is_valid_le * * Return: True if @le is valid. */ static inline bool al_is_valid_le(const struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le) { if (!le || !ni->attr_list.le || !ni->attr_list.size) return false; return PtrOffset(ni->attr_list.le, le) + le16_to_cpu(le->size) <= ni->attr_list.size; } void al_destroy(struct ntfs_inode *ni) { run_close(&ni->attr_list.run); kvfree(ni->attr_list.le); ni->attr_list.le = NULL; ni->attr_list.size = 0; ni->attr_list.dirty = false; } /* * ntfs_load_attr_list * * This method makes sure that the ATTRIB list, if present, * has been properly set up. */ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr) { int err; size_t lsize; void *le = NULL; if (ni->attr_list.size) return 0; if (!attr->non_res) { lsize = le32_to_cpu(attr->res.data_size); /* attr is resident: lsize < record_size (1K or 4K) */ le = kvmalloc(al_aligned(lsize), GFP_KERNEL); if (!le) { err = -ENOMEM; goto out; } memcpy(le, resident_data(attr), lsize); } else if (attr->nres.svcn) { err = -EINVAL; goto out; } else { u16 run_off = le16_to_cpu(attr->nres.run_off); lsize = le64_to_cpu(attr->nres.data_size); run_init(&ni->attr_list.run); if (run_off > le32_to_cpu(attr->size)) { err = -EINVAL; goto out; } err = run_unpack_ex(&ni->attr_list.run, ni->mi.sbi, ni->mi.rno, 0, le64_to_cpu(attr->nres.evcn), 0, Add2Ptr(attr, run_off), le32_to_cpu(attr->size) - run_off); if (err < 0) goto out; /* attr is nonresident. * The worst case: * 1T (2^40) extremely fragmented file. * cluster = 4K (2^12) => 2^28 fragments * 2^9 fragments per one record => 2^19 records * 2^5 bytes of ATTR_LIST_ENTRY per one record => 2^24 bytes. * * the result is 16M bytes per attribute list. * Use kvmalloc to allocate in range [several Kbytes - dozen Mbytes] */ le = kvmalloc(al_aligned(lsize), GFP_KERNEL); if (!le) { err = -ENOMEM; goto out; } err = ntfs_read_run_nb(ni->mi.sbi, &ni->attr_list.run, 0, le, lsize, NULL); if (err) goto out; } ni->attr_list.size = lsize; ni->attr_list.le = le; return 0; out: ni->attr_list.le = le; al_destroy(ni); return err; } /* * al_enumerate * * Return: * * The next list le. * * If @le is NULL then return the first le. */ struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le) { size_t off; u16 sz; const unsigned le_min_size = le_size(0); if (!le) { le = ni->attr_list.le; } else { sz = le16_to_cpu(le->size); if (sz < le_min_size) { /* Impossible 'cause we should not return such le. */ return NULL; } le = Add2Ptr(le, sz); } /* Check boundary. */ off = PtrOffset(ni->attr_list.le, le); if (off + le_min_size > ni->attr_list.size) { /* The regular end of list. */ return NULL; } sz = le16_to_cpu(le->size); /* Check le for errors. */ if (sz < le_min_size || off + sz > ni->attr_list.size || sz < le->name_off + le->name_len * sizeof(short)) { return NULL; } return le; } /* * al_find_le * * Find the first le in the list which matches type, name and VCN. * * Return: NULL if not found. */ struct ATTR_LIST_ENTRY *al_find_le(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le, const struct ATTRIB *attr) { CLST svcn = attr_svcn(attr); return al_find_ex(ni, le, attr->type, attr_name(attr), attr->name_len, &svcn); } /* * al_find_ex * * Find the first le in the list which matches type, name and VCN. * * Return: NULL if not found. */ struct ATTR_LIST_ENTRY *al_find_ex(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le, enum ATTR_TYPE type, const __le16 *name, u8 name_len, const CLST *vcn) { struct ATTR_LIST_ENTRY *ret = NULL; u32 type_in = le32_to_cpu(type); while ((le = al_enumerate(ni, le))) { u64 le_vcn; int diff = le32_to_cpu(le->type) - type_in; /* List entries are sorted by type, name and VCN. */ if (diff < 0) continue; if (diff > 0) return ret; if (le->name_len != name_len) continue; le_vcn = le64_to_cpu(le->vcn); if (!le_vcn) { /* * Compare entry names only for entry with vcn == 0. */ diff = ntfs_cmp_names(le_name(le), name_len, name, name_len, ni->mi.sbi->upcase, true); if (diff < 0) continue; if (diff > 0) return ret; } if (!vcn) return le; if (*vcn == le_vcn) return le; if (*vcn < le_vcn) return ret; ret = le; } return ret; } /* * al_find_le_to_insert * * Find the first list entry which matches type, name and VCN. */ static struct ATTR_LIST_ENTRY *al_find_le_to_insert(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name, u8 name_len, CLST vcn) { struct ATTR_LIST_ENTRY *le = NULL, *prev; u32 type_in = le32_to_cpu(type); /* List entries are sorted by type, name and VCN. */ while ((le = al_enumerate(ni, prev = le))) { int diff = le32_to_cpu(le->type) - type_in; if (diff < 0) continue; if (diff > 0) return le; if (!le->vcn) { /* * Compare entry names only for entry with vcn == 0. */ diff = ntfs_cmp_names(le_name(le), le->name_len, name, name_len, ni->mi.sbi->upcase, true); if (diff < 0) continue; if (diff > 0) return le; } if (le64_to_cpu(le->vcn) >= vcn) return le; } return prev ? Add2Ptr(prev, le16_to_cpu(prev->size)) : ni->attr_list.le; } /* * al_add_le * * Add an "attribute list entry" to the list. */ int al_add_le(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name, u8 name_len, CLST svcn, __le16 id, const struct MFT_REF *ref, struct ATTR_LIST_ENTRY **new_le) { int err; struct ATTRIB *attr; struct ATTR_LIST_ENTRY *le; size_t off; u16 sz; size_t asize, new_asize, old_size; u64 new_size; typeof(ni->attr_list) *al = &ni->attr_list; /* * Compute the size of the new 'le' */ sz = le_size(name_len); old_size = al->size; new_size = old_size + sz; asize = al_aligned(old_size); new_asize = al_aligned(new_size); /* Scan forward to the point at which the new 'le' should be inserted. */ le = al_find_le_to_insert(ni, type, name, name_len, svcn); off = PtrOffset(al->le, le); if (new_size > asize) { void *ptr = kmalloc(new_asize, GFP_NOFS); if (!ptr) return -ENOMEM; memcpy(ptr, al->le, off); memcpy(Add2Ptr(ptr, off + sz), le, old_size - off); le = Add2Ptr(ptr, off); kvfree(al->le); al->le = ptr; } else { memmove(Add2Ptr(le, sz), le, old_size - off); } *new_le = le; al->size = new_size; le->type = type; le->size = cpu_to_le16(sz); le->name_len = name_len; le->name_off = offsetof(struct ATTR_LIST_ENTRY, name); le->vcn = cpu_to_le64(svcn); le->ref = *ref; le->id = id; memcpy(le->name, name, sizeof(short) * name_len); err = attr_set_size(ni, ATTR_LIST, NULL, 0, &al->run, new_size, &new_size, true, &attr); if (err) { /* Undo memmove above. */ memmove(le, Add2Ptr(le, sz), old_size - off); al->size = old_size; return err; } al->dirty = true; if (attr && attr->non_res) { err = ntfs_sb_write_run(ni->mi.sbi, &al->run, 0, al->le, al->size, 0); if (err) return err; al->dirty = false; } return 0; } /* * al_remove_le - Remove @le from attribute list. */ bool al_remove_le(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le) { u16 size; size_t off; typeof(ni->attr_list) *al = &ni->attr_list; if (!al_is_valid_le(ni, le)) return false; /* Save on stack the size of 'le' */ size = le16_to_cpu(le->size); off = PtrOffset(al->le, le); memmove(le, Add2Ptr(le, size), al->size - (off + size)); al->size -= size; al->dirty = true; return true; } int al_update(struct ntfs_inode *ni, int sync) { int err; struct ATTRIB *attr; typeof(ni->attr_list) *al = &ni->attr_list; if (!al->dirty || !al->size) return 0; /* * Attribute list increased on demand in al_add_le. * Attribute list decreased here. */ err = attr_set_size(ni, ATTR_LIST, NULL, 0, &al->run, al->size, NULL, false, &attr); if (err) goto out; if (!attr->non_res) { memcpy(resident_data(attr), al->le, al->size); } else { err = ntfs_sb_write_run(ni->mi.sbi, &al->run, 0, al->le, al->size, sync); if (err) goto out; attr->nres.valid_size = attr->nres.data_size; } ni->mi.dirty = true; al->dirty = false; out: return err; }
1 1 23 14 22 3 27 27 3 24 19 5 19 1 23 2 21 19 2 20 13 4 2 3 1 4 1 27 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 6 1 5 5 2 2 6 4 4 4 4 4 4 4 16 15 14 2 2 2 11 2 1 9 9 11 1 10 11 16 1 8 1 10 10 1 9 9 10 9 1 9 9 8 26 24 24 24 15 5 10 2 8 24 25 6 6 6 6 3 3 3 6 6 8 8 7 7 6 2 4 4 4 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2010 IBM Corporation * Copyright (C) 2010 Politecnico di Torino, Italy * TORSEC group -- https://security.polito.it * * Authors: * Mimi Zohar <zohar@us.ibm.com> * Roberto Sassu <roberto.sassu@polito.it> * * See Documentation/security/keys/trusted-encrypted.rst */ #include <linux/uaccess.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/parser.h> #include <linux/string.h> #include <linux/err.h> #include <keys/user-type.h> #include <keys/trusted-type.h> #include <keys/encrypted-type.h> #include <linux/key-type.h> #include <linux/random.h> #include <linux/rcupdate.h> #include <linux/scatterlist.h> #include <linux/ctype.h> #include <crypto/aes.h> #include <crypto/hash.h> #include <crypto/sha2.h> #include <crypto/skcipher.h> #include <crypto/utils.h> #include "encrypted.h" #include "ecryptfs_format.h" static const char KEY_TRUSTED_PREFIX[] = "trusted:"; static const char KEY_USER_PREFIX[] = "user:"; static const char hash_alg[] = "sha256"; static const char hmac_alg[] = "hmac(sha256)"; static const char blkcipher_alg[] = "cbc(aes)"; static const char key_format_default[] = "default"; static const char key_format_ecryptfs[] = "ecryptfs"; static const char key_format_enc32[] = "enc32"; static unsigned int ivsize; static int blksize; #define KEY_TRUSTED_PREFIX_LEN (sizeof (KEY_TRUSTED_PREFIX) - 1) #define KEY_USER_PREFIX_LEN (sizeof (KEY_USER_PREFIX) - 1) #define KEY_ECRYPTFS_DESC_LEN 16 #define HASH_SIZE SHA256_DIGEST_SIZE #define MAX_DATA_SIZE 4096 #define MIN_DATA_SIZE 20 #define KEY_ENC32_PAYLOAD_LEN 32 static struct crypto_shash *hash_tfm; enum { Opt_new, Opt_load, Opt_update, Opt_err }; enum { Opt_default, Opt_ecryptfs, Opt_enc32, Opt_error }; static const match_table_t key_format_tokens = { {Opt_default, "default"}, {Opt_ecryptfs, "ecryptfs"}, {Opt_enc32, "enc32"}, {Opt_error, NULL} }; static const match_table_t key_tokens = { {Opt_new, "new"}, {Opt_load, "load"}, {Opt_update, "update"}, {Opt_err, NULL} }; static bool user_decrypted_data = IS_ENABLED(CONFIG_USER_DECRYPTED_DATA); module_param(user_decrypted_data, bool, 0); MODULE_PARM_DESC(user_decrypted_data, "Allow instantiation of encrypted keys using provided decrypted data"); static int aes_get_sizes(void) { struct crypto_skcipher *tfm; tfm = crypto_alloc_skcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { pr_err("encrypted_key: failed to alloc_cipher (%ld)\n", PTR_ERR(tfm)); return PTR_ERR(tfm); } ivsize = crypto_skcipher_ivsize(tfm); blksize = crypto_skcipher_blocksize(tfm); crypto_free_skcipher(tfm); return 0; } /* * valid_ecryptfs_desc - verify the description of a new/loaded encrypted key * * The description of a encrypted key with format 'ecryptfs' must contain * exactly 16 hexadecimal characters. * */ static int valid_ecryptfs_desc(const char *ecryptfs_desc) { int i; if (strlen(ecryptfs_desc) != KEY_ECRYPTFS_DESC_LEN) { pr_err("encrypted_key: key description must be %d hexadecimal " "characters long\n", KEY_ECRYPTFS_DESC_LEN); return -EINVAL; } for (i = 0; i < KEY_ECRYPTFS_DESC_LEN; i++) { if (!isxdigit(ecryptfs_desc[i])) { pr_err("encrypted_key: key description must contain " "only hexadecimal characters\n"); return -EINVAL; } } return 0; } /* * valid_master_desc - verify the 'key-type:desc' of a new/updated master-key * * key-type:= "trusted:" | "user:" * desc:= master-key description * * Verify that 'key-type' is valid and that 'desc' exists. On key update, * only the master key description is permitted to change, not the key-type. * The key-type remains constant. * * On success returns 0, otherwise -EINVAL. */ static int valid_master_desc(const char *new_desc, const char *orig_desc) { int prefix_len; if (!strncmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN)) prefix_len = KEY_TRUSTED_PREFIX_LEN; else if (!strncmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN)) prefix_len = KEY_USER_PREFIX_LEN; else return -EINVAL; if (!new_desc[prefix_len]) return -EINVAL; if (orig_desc && strncmp(new_desc, orig_desc, prefix_len)) return -EINVAL; return 0; } /* * datablob_parse - parse the keyctl data * * datablob format: * new [<format>] <master-key name> <decrypted data length> [<decrypted data>] * load [<format>] <master-key name> <decrypted data length> * <encrypted iv + data> * update <new-master-key name> * * Tokenizes a copy of the keyctl data, returning a pointer to each token, * which is null terminated. * * On success returns 0, otherwise -EINVAL. */ static int datablob_parse(char *datablob, const char **format, char **master_desc, char **decrypted_datalen, char **hex_encoded_iv, char **decrypted_data) { substring_t args[MAX_OPT_ARGS]; int ret = -EINVAL; int key_cmd; int key_format; char *p, *keyword; keyword = strsep(&datablob, " \t"); if (!keyword) { pr_info("encrypted_key: insufficient parameters specified\n"); return ret; } key_cmd = match_token(keyword, key_tokens, args); /* Get optional format: default | ecryptfs */ p = strsep(&datablob, " \t"); if (!p) { pr_err("encrypted_key: insufficient parameters specified\n"); return ret; } key_format = match_token(p, key_format_tokens, args); switch (key_format) { case Opt_ecryptfs: case Opt_enc32: case Opt_default: *format = p; *master_desc = strsep(&datablob, " \t"); break; case Opt_error: *master_desc = p; break; } if (!*master_desc) { pr_info("encrypted_key: master key parameter is missing\n"); goto out; } if (valid_master_desc(*master_desc, NULL) < 0) { pr_info("encrypted_key: master key parameter \'%s\' " "is invalid\n", *master_desc); goto out; } if (decrypted_datalen) { *decrypted_datalen = strsep(&datablob, " \t"); if (!*decrypted_datalen) { pr_info("encrypted_key: keylen parameter is missing\n"); goto out; } } switch (key_cmd) { case Opt_new: if (!decrypted_datalen) { pr_info("encrypted_key: keyword \'%s\' not allowed " "when called from .update method\n", keyword); break; } *decrypted_data = strsep(&datablob, " \t"); ret = 0; break; case Opt_load: if (!decrypted_datalen) { pr_info("encrypted_key: keyword \'%s\' not allowed " "when called from .update method\n", keyword); break; } *hex_encoded_iv = strsep(&datablob, " \t"); if (!*hex_encoded_iv) { pr_info("encrypted_key: hex blob is missing\n"); break; } ret = 0; break; case Opt_update: if (decrypted_datalen) { pr_info("encrypted_key: keyword \'%s\' not allowed " "when called from .instantiate method\n", keyword); break; } ret = 0; break; case Opt_err: pr_info("encrypted_key: keyword \'%s\' not recognized\n", keyword); break; } out: return ret; } /* * datablob_format - format as an ascii string, before copying to userspace */ static char *datablob_format(struct encrypted_key_payload *epayload, size_t asciiblob_len) { char *ascii_buf, *bufp; u8 *iv = epayload->iv; int len; int i; ascii_buf = kmalloc(asciiblob_len + 1, GFP_KERNEL); if (!ascii_buf) goto out; ascii_buf[asciiblob_len] = '\0'; /* copy datablob master_desc and datalen strings */ len = sprintf(ascii_buf, "%s %s %s ", epayload->format, epayload->master_desc, epayload->datalen); /* convert the hex encoded iv, encrypted-data and HMAC to ascii */ bufp = &ascii_buf[len]; for (i = 0; i < (asciiblob_len - len) / 2; i++) bufp = hex_byte_pack(bufp, iv[i]); out: return ascii_buf; } /* * request_user_key - request the user key * * Use a user provided key to encrypt/decrypt an encrypted-key. */ static struct key *request_user_key(const char *master_desc, const u8 **master_key, size_t *master_keylen) { const struct user_key_payload *upayload; struct key *ukey; ukey = request_key(&key_type_user, master_desc, NULL); if (IS_ERR(ukey)) goto error; down_read(&ukey->sem); upayload = user_key_payload_locked(ukey); if (!upayload) { /* key was revoked before we acquired its semaphore */ up_read(&ukey->sem); key_put(ukey); ukey = ERR_PTR(-EKEYREVOKED); goto error; } *master_key = upayload->data; *master_keylen = upayload->datalen; error: return ukey; } static int calc_hmac(u8 *digest, const u8 *key, unsigned int keylen, const u8 *buf, unsigned int buflen) { struct crypto_shash *tfm; int err; tfm = crypto_alloc_shash(hmac_alg, 0, 0); if (IS_ERR(tfm)) { pr_err("encrypted_key: can't alloc %s transform: %ld\n", hmac_alg, PTR_ERR(tfm)); return PTR_ERR(tfm); } err = crypto_shash_setkey(tfm, key, keylen); if (!err) err = crypto_shash_tfm_digest(tfm, buf, buflen, digest); crypto_free_shash(tfm); return err; } enum derived_key_type { ENC_KEY, AUTH_KEY }; /* Derive authentication/encryption key from trusted key */ static int get_derived_key(u8 *derived_key, enum derived_key_type key_type, const u8 *master_key, size_t master_keylen) { u8 *derived_buf; unsigned int derived_buf_len; int ret; derived_buf_len = strlen("AUTH_KEY") + 1 + master_keylen; if (derived_buf_len < HASH_SIZE) derived_buf_len = HASH_SIZE; derived_buf = kzalloc(derived_buf_len, GFP_KERNEL); if (!derived_buf) return -ENOMEM; if (key_type) strcpy(derived_buf, "AUTH_KEY"); else strcpy(derived_buf, "ENC_KEY"); memcpy(derived_buf + strlen(derived_buf) + 1, master_key, master_keylen); ret = crypto_shash_tfm_digest(hash_tfm, derived_buf, derived_buf_len, derived_key); kfree_sensitive(derived_buf); return ret; } static struct skcipher_request *init_skcipher_req(const u8 *key, unsigned int key_len) { struct skcipher_request *req; struct crypto_skcipher *tfm; int ret; tfm = crypto_alloc_skcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { pr_err("encrypted_key: failed to load %s transform (%ld)\n", blkcipher_alg, PTR_ERR(tfm)); return ERR_CAST(tfm); } ret = crypto_skcipher_setkey(tfm, key, key_len); if (ret < 0) { pr_err("encrypted_key: failed to setkey (%d)\n", ret); crypto_free_skcipher(tfm); return ERR_PTR(ret); } req = skcipher_request_alloc(tfm, GFP_KERNEL); if (!req) { pr_err("encrypted_key: failed to allocate request for %s\n", blkcipher_alg); crypto_free_skcipher(tfm); return ERR_PTR(-ENOMEM); } skcipher_request_set_callback(req, 0, NULL, NULL); return req; } static struct key *request_master_key(struct encrypted_key_payload *epayload, const u8 **master_key, size_t *master_keylen) { struct key *mkey = ERR_PTR(-EINVAL); if (!strncmp(epayload->master_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN)) { mkey = request_trusted_key(epayload->master_desc + KEY_TRUSTED_PREFIX_LEN, master_key, master_keylen); } else if (!strncmp(epayload->master_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN)) { mkey = request_user_key(epayload->master_desc + KEY_USER_PREFIX_LEN, master_key, master_keylen); } else goto out; if (IS_ERR(mkey)) { int ret = PTR_ERR(mkey); if (ret == -ENOTSUPP) pr_info("encrypted_key: key %s not supported", epayload->master_desc); else pr_info("encrypted_key: key %s not found", epayload->master_desc); goto out; } dump_master_key(*master_key, *master_keylen); out: return mkey; } /* Before returning data to userspace, encrypt decrypted data. */ static int derived_key_encrypt(struct encrypted_key_payload *epayload, const u8 *derived_key, unsigned int derived_keylen) { struct scatterlist sg_in[2]; struct scatterlist sg_out[1]; struct crypto_skcipher *tfm; struct skcipher_request *req; unsigned int encrypted_datalen; u8 iv[AES_BLOCK_SIZE]; int ret; encrypted_datalen = roundup(epayload->decrypted_datalen, blksize); req = init_skcipher_req(derived_key, derived_keylen); ret = PTR_ERR(req); if (IS_ERR(req)) goto out; dump_decrypted_data(epayload); sg_init_table(sg_in, 2); sg_set_buf(&sg_in[0], epayload->decrypted_data, epayload->decrypted_datalen); sg_set_page(&sg_in[1], ZERO_PAGE(0), AES_BLOCK_SIZE, 0); sg_init_table(sg_out, 1); sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen); memcpy(iv, epayload->iv, sizeof(iv)); skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv); ret = crypto_skcipher_encrypt(req); tfm = crypto_skcipher_reqtfm(req); skcipher_request_free(req); crypto_free_skcipher(tfm); if (ret < 0) pr_err("encrypted_key: failed to encrypt (%d)\n", ret); else dump_encrypted_data(epayload, encrypted_datalen); out: return ret; } static int datablob_hmac_append(struct encrypted_key_payload *epayload, const u8 *master_key, size_t master_keylen) { u8 derived_key[HASH_SIZE]; u8 *digest; int ret; ret = get_derived_key(derived_key, AUTH_KEY, master_key, master_keylen); if (ret < 0) goto out; digest = epayload->format + epayload->datablob_len; ret = calc_hmac(digest, derived_key, sizeof derived_key, epayload->format, epayload->datablob_len); if (!ret) dump_hmac(NULL, digest, HASH_SIZE); out: memzero_explicit(derived_key, sizeof(derived_key)); return ret; } /* verify HMAC before decrypting encrypted key */ static int datablob_hmac_verify(struct encrypted_key_payload *epayload, const u8 *format, const u8 *master_key, size_t master_keylen) { u8 derived_key[HASH_SIZE]; u8 digest[HASH_SIZE]; int ret; char *p; unsigned short len; ret = get_derived_key(derived_key, AUTH_KEY, master_key, master_keylen); if (ret < 0) goto out; len = epayload->datablob_len; if (!format) { p = epayload->master_desc; len -= strlen(epayload->format) + 1; } else p = epayload->format; ret = calc_hmac(digest, derived_key, sizeof derived_key, p, len); if (ret < 0) goto out; ret = crypto_memneq(digest, epayload->format + epayload->datablob_len, sizeof(digest)); if (ret) { ret = -EINVAL; dump_hmac("datablob", epayload->format + epayload->datablob_len, HASH_SIZE); dump_hmac("calc", digest, HASH_SIZE); } out: memzero_explicit(derived_key, sizeof(derived_key)); return ret; } static int derived_key_decrypt(struct encrypted_key_payload *epayload, const u8 *derived_key, unsigned int derived_keylen) { struct scatterlist sg_in[1]; struct scatterlist sg_out[2]; struct crypto_skcipher *tfm; struct skcipher_request *req; unsigned int encrypted_datalen; u8 iv[AES_BLOCK_SIZE]; u8 *pad; int ret; /* Throwaway buffer to hold the unused zero padding at the end */ pad = kmalloc(AES_BLOCK_SIZE, GFP_KERNEL); if (!pad) return -ENOMEM; encrypted_datalen = roundup(epayload->decrypted_datalen, blksize); req = init_skcipher_req(derived_key, derived_keylen); ret = PTR_ERR(req); if (IS_ERR(req)) goto out; dump_encrypted_data(epayload, encrypted_datalen); sg_init_table(sg_in, 1); sg_init_table(sg_out, 2); sg_set_buf(sg_in, epayload->encrypted_data, encrypted_datalen); sg_set_buf(&sg_out[0], epayload->decrypted_data, epayload->decrypted_datalen); sg_set_buf(&sg_out[1], pad, AES_BLOCK_SIZE); memcpy(iv, epayload->iv, sizeof(iv)); skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv); ret = crypto_skcipher_decrypt(req); tfm = crypto_skcipher_reqtfm(req); skcipher_request_free(req); crypto_free_skcipher(tfm); if (ret < 0) goto out; dump_decrypted_data(epayload); out: kfree(pad); return ret; } /* Allocate memory for decrypted key and datablob. */ static struct encrypted_key_payload *encrypted_key_alloc(struct key *key, const char *format, const char *master_desc, const char *datalen, const char *decrypted_data) { struct encrypted_key_payload *epayload = NULL; unsigned short datablob_len; unsigned short decrypted_datalen; unsigned short payload_datalen; unsigned int encrypted_datalen; unsigned int format_len; long dlen; int i; int ret; ret = kstrtol(datalen, 10, &dlen); if (ret < 0 || dlen < MIN_DATA_SIZE || dlen > MAX_DATA_SIZE) return ERR_PTR(-EINVAL); format_len = (!format) ? strlen(key_format_default) : strlen(format); decrypted_datalen = dlen; payload_datalen = decrypted_datalen; if (decrypted_data) { if (!user_decrypted_data) { pr_err("encrypted key: instantiation of keys using provided decrypted data is disabled since CONFIG_USER_DECRYPTED_DATA is set to false\n"); return ERR_PTR(-EINVAL); } if (strlen(decrypted_data) != decrypted_datalen * 2) { pr_err("encrypted key: decrypted data provided does not match decrypted data length provided\n"); return ERR_PTR(-EINVAL); } for (i = 0; i < strlen(decrypted_data); i++) { if (!isxdigit(decrypted_data[i])) { pr_err("encrypted key: decrypted data provided must contain only hexadecimal characters\n"); return ERR_PTR(-EINVAL); } } } if (format) { if (!strcmp(format, key_format_ecryptfs)) { if (dlen != ECRYPTFS_MAX_KEY_BYTES) { pr_err("encrypted_key: keylen for the ecryptfs format must be equal to %d bytes\n", ECRYPTFS_MAX_KEY_BYTES); return ERR_PTR(-EINVAL); } decrypted_datalen = ECRYPTFS_MAX_KEY_BYTES; payload_datalen = sizeof(struct ecryptfs_auth_tok); } else if (!strcmp(format, key_format_enc32)) { if (decrypted_datalen != KEY_ENC32_PAYLOAD_LEN) { pr_err("encrypted_key: enc32 key payload incorrect length: %d\n", decrypted_datalen); return ERR_PTR(-EINVAL); } } } encrypted_datalen = roundup(decrypted_datalen, blksize); datablob_len = format_len + 1 + strlen(master_desc) + 1 + strlen(datalen) + 1 + ivsize + 1 + encrypted_datalen; ret = key_payload_reserve(key, payload_datalen + datablob_len + HASH_SIZE + 1); if (ret < 0) return ERR_PTR(ret); epayload = kzalloc(sizeof(*epayload) + payload_datalen + datablob_len + HASH_SIZE + 1, GFP_KERNEL); if (!epayload) return ERR_PTR(-ENOMEM); epayload->payload_datalen = payload_datalen; epayload->decrypted_datalen = decrypted_datalen; epayload->datablob_len = datablob_len; return epayload; } static int encrypted_key_decrypt(struct encrypted_key_payload *epayload, const char *format, const char *hex_encoded_iv) { struct key *mkey; u8 derived_key[HASH_SIZE]; const u8 *master_key; u8 *hmac; const char *hex_encoded_data; unsigned int encrypted_datalen; size_t master_keylen; size_t asciilen; int ret; encrypted_datalen = roundup(epayload->decrypted_datalen, blksize); asciilen = (ivsize + 1 + encrypted_datalen + HASH_SIZE) * 2; if (strlen(hex_encoded_iv) != asciilen) return -EINVAL; hex_encoded_data = hex_encoded_iv + (2 * ivsize) + 2; ret = hex2bin(epayload->iv, hex_encoded_iv, ivsize); if (ret < 0) return -EINVAL; ret = hex2bin(epayload->encrypted_data, hex_encoded_data, encrypted_datalen); if (ret < 0) return -EINVAL; hmac = epayload->format + epayload->datablob_len; ret = hex2bin(hmac, hex_encoded_data + (encrypted_datalen * 2), HASH_SIZE); if (ret < 0) return -EINVAL; mkey = request_master_key(epayload, &master_key, &master_keylen); if (IS_ERR(mkey)) return PTR_ERR(mkey); ret = datablob_hmac_verify(epayload, format, master_key, master_keylen); if (ret < 0) { pr_err("encrypted_key: bad hmac (%d)\n", ret); goto out; } ret = get_derived_key(derived_key, ENC_KEY, master_key, master_keylen); if (ret < 0) goto out; ret = derived_key_decrypt(epayload, derived_key, sizeof derived_key); if (ret < 0) pr_err("encrypted_key: failed to decrypt key (%d)\n", ret); out: up_read(&mkey->sem); key_put(mkey); memzero_explicit(derived_key, sizeof(derived_key)); return ret; } static void __ekey_init(struct encrypted_key_payload *epayload, const char *format, const char *master_desc, const char *datalen) { unsigned int format_len; format_len = (!format) ? strlen(key_format_default) : strlen(format); epayload->format = epayload->payload_data + epayload->payload_datalen; epayload->master_desc = epayload->format + format_len + 1; epayload->datalen = epayload->master_desc + strlen(master_desc) + 1; epayload->iv = epayload->datalen + strlen(datalen) + 1; epayload->encrypted_data = epayload->iv + ivsize + 1; epayload->decrypted_data = epayload->payload_data; if (!format) memcpy(epayload->format, key_format_default, format_len); else { if (!strcmp(format, key_format_ecryptfs)) epayload->decrypted_data = ecryptfs_get_auth_tok_key((struct ecryptfs_auth_tok *)epayload->payload_data); memcpy(epayload->format, format, format_len); } memcpy(epayload->master_desc, master_desc, strlen(master_desc)); memcpy(epayload->datalen, datalen, strlen(datalen)); } /* * encrypted_init - initialize an encrypted key * * For a new key, use either a random number or user-provided decrypted data in * case it is provided. A random number is used for the iv in both cases. For * an old key, decrypt the hex encoded data. */ static int encrypted_init(struct encrypted_key_payload *epayload, const char *key_desc, const char *format, const char *master_desc, const char *datalen, const char *hex_encoded_iv, const char *decrypted_data) { int ret = 0; if (format && !strcmp(format, key_format_ecryptfs)) { ret = valid_ecryptfs_desc(key_desc); if (ret < 0) return ret; ecryptfs_fill_auth_tok((struct ecryptfs_auth_tok *)epayload->payload_data, key_desc); } __ekey_init(epayload, format, master_desc, datalen); if (hex_encoded_iv) { ret = encrypted_key_decrypt(epayload, format, hex_encoded_iv); } else if (decrypted_data) { get_random_bytes(epayload->iv, ivsize); ret = hex2bin(epayload->decrypted_data, decrypted_data, epayload->decrypted_datalen); } else { get_random_bytes(epayload->iv, ivsize); get_random_bytes(epayload->decrypted_data, epayload->decrypted_datalen); } return ret; } /* * encrypted_instantiate - instantiate an encrypted key * * Instantiates the key: * - by decrypting an existing encrypted datablob, or * - by creating a new encrypted key based on a kernel random number, or * - using provided decrypted data. * * On success, return 0. Otherwise return errno. */ static int encrypted_instantiate(struct key *key, struct key_preparsed_payload *prep) { struct encrypted_key_payload *epayload = NULL; char *datablob = NULL; const char *format = NULL; char *master_desc = NULL; char *decrypted_datalen = NULL; char *hex_encoded_iv = NULL; char *decrypted_data = NULL; size_t datalen = prep->datalen; int ret; if (datalen <= 0 || datalen > 32767 || !prep->data) return -EINVAL; datablob = kmalloc(datalen + 1, GFP_KERNEL); if (!datablob) return -ENOMEM; datablob[datalen] = 0; memcpy(datablob, prep->data, datalen); ret = datablob_parse(datablob, &format, &master_desc, &decrypted_datalen, &hex_encoded_iv, &decrypted_data); if (ret < 0) goto out; epayload = encrypted_key_alloc(key, format, master_desc, decrypted_datalen, decrypted_data); if (IS_ERR(epayload)) { ret = PTR_ERR(epayload); goto out; } ret = encrypted_init(epayload, key->description, format, master_desc, decrypted_datalen, hex_encoded_iv, decrypted_data); if (ret < 0) { kfree_sensitive(epayload); goto out; } rcu_assign_keypointer(key, epayload); out: kfree_sensitive(datablob); return ret; } static void encrypted_rcu_free(struct rcu_head *rcu) { struct encrypted_key_payload *epayload; epayload = container_of(rcu, struct encrypted_key_payload, rcu); kfree_sensitive(epayload); } /* * encrypted_update - update the master key description * * Change the master key description for an existing encrypted key. * The next read will return an encrypted datablob using the new * master key description. * * On success, return 0. Otherwise return errno. */ static int encrypted_update(struct key *key, struct key_preparsed_payload *prep) { struct encrypted_key_payload *epayload = key->payload.data[0]; struct encrypted_key_payload *new_epayload; char *buf; char *new_master_desc = NULL; const char *format = NULL; size_t datalen = prep->datalen; int ret = 0; if (key_is_negative(key)) return -ENOKEY; if (datalen <= 0 || datalen > 32767 || !prep->data) return -EINVAL; buf = kmalloc(datalen + 1, GFP_KERNEL); if (!buf) return -ENOMEM; buf[datalen] = 0; memcpy(buf, prep->data, datalen); ret = datablob_parse(buf, &format, &new_master_desc, NULL, NULL, NULL); if (ret < 0) goto out; ret = valid_master_desc(new_master_desc, epayload->master_desc); if (ret < 0) goto out; new_epayload = encrypted_key_alloc(key, epayload->format, new_master_desc, epayload->datalen, NULL); if (IS_ERR(new_epayload)) { ret = PTR_ERR(new_epayload); goto out; } __ekey_init(new_epayload, epayload->format, new_master_desc, epayload->datalen); memcpy(new_epayload->iv, epayload->iv, ivsize); memcpy(new_epayload->payload_data, epayload->payload_data, epayload->payload_datalen); rcu_assign_keypointer(key, new_epayload); call_rcu(&epayload->rcu, encrypted_rcu_free); out: kfree_sensitive(buf); return ret; } /* * encrypted_read - format and copy out the encrypted data * * The resulting datablob format is: * <master-key name> <decrypted data length> <encrypted iv> <encrypted data> * * On success, return to userspace the encrypted key datablob size. */ static long encrypted_read(const struct key *key, char *buffer, size_t buflen) { struct encrypted_key_payload *epayload; struct key *mkey; const u8 *master_key; size_t master_keylen; char derived_key[HASH_SIZE]; char *ascii_buf; size_t asciiblob_len; int ret; epayload = dereference_key_locked(key); /* returns the hex encoded iv, encrypted-data, and hmac as ascii */ asciiblob_len = epayload->datablob_len + ivsize + 1 + roundup(epayload->decrypted_datalen, blksize) + (HASH_SIZE * 2); if (!buffer || buflen < asciiblob_len) return asciiblob_len; mkey = request_master_key(epayload, &master_key, &master_keylen); if (IS_ERR(mkey)) return PTR_ERR(mkey); ret = get_derived_key(derived_key, ENC_KEY, master_key, master_keylen); if (ret < 0) goto out; ret = derived_key_encrypt(epayload, derived_key, sizeof derived_key); if (ret < 0) goto out; ret = datablob_hmac_append(epayload, master_key, master_keylen); if (ret < 0) goto out; ascii_buf = datablob_format(epayload, asciiblob_len); if (!ascii_buf) { ret = -ENOMEM; goto out; } up_read(&mkey->sem); key_put(mkey); memzero_explicit(derived_key, sizeof(derived_key)); memcpy(buffer, ascii_buf, asciiblob_len); kfree_sensitive(ascii_buf); return asciiblob_len; out: up_read(&mkey->sem); key_put(mkey); memzero_explicit(derived_key, sizeof(derived_key)); return ret; } /* * encrypted_destroy - clear and free the key's payload */ static void encrypted_destroy(struct key *key) { kfree_sensitive(key->payload.data[0]); } struct key_type key_type_encrypted = { .name = "encrypted", .instantiate = encrypted_instantiate, .update = encrypted_update, .destroy = encrypted_destroy, .describe = user_describe, .read = encrypted_read, }; EXPORT_SYMBOL_GPL(key_type_encrypted); static int __init init_encrypted(void) { int ret; hash_tfm = crypto_alloc_shash(hash_alg, 0, 0); if (IS_ERR(hash_tfm)) { pr_err("encrypted_key: can't allocate %s transform: %ld\n", hash_alg, PTR_ERR(hash_tfm)); return PTR_ERR(hash_tfm); } ret = aes_get_sizes(); if (ret < 0) goto out; ret = register_key_type(&key_type_encrypted); if (ret < 0) goto out; return 0; out: crypto_free_shash(hash_tfm); return ret; } static void __exit cleanup_encrypted(void) { crypto_free_shash(hash_tfm); unregister_key_type(&key_type_encrypted); } late_initcall(init_encrypted); module_exit(cleanup_encrypted); MODULE_DESCRIPTION("Encrypted key type"); MODULE_LICENSE("GPL");
10 36 38 7 6 7 2 10 10 3 9 9 14 9 4 14 26 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 // SPDX-License-Identifier: GPL-2.0 /* Bluetooth HCI driver model support. */ #include <linux/module.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> static const struct class bt_class = { .name = "bluetooth", }; static void bt_link_release(struct device *dev) { struct hci_conn *conn = to_hci_conn(dev); kfree(conn); } static const struct device_type bt_link = { .name = "link", .release = bt_link_release, }; void hci_conn_init_sysfs(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; bt_dev_dbg(hdev, "conn %p", conn); conn->dev.type = &bt_link; conn->dev.class = &bt_class; conn->dev.parent = &hdev->dev; device_initialize(&conn->dev); } void hci_conn_add_sysfs(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; bt_dev_dbg(hdev, "conn %p", conn); if (device_is_registered(&conn->dev)) return; dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); if (device_add(&conn->dev) < 0) bt_dev_err(hdev, "failed to register connection device"); } void hci_conn_del_sysfs(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; bt_dev_dbg(hdev, "conn %p", conn); if (!device_is_registered(&conn->dev)) { /* If device_add() has *not* succeeded, use *only* put_device() * to drop the reference count. */ put_device(&conn->dev); return; } /* If there are devices using the connection as parent reset it to NULL * before unregistering the device. */ while (1) { struct device *dev; dev = device_find_any_child(&conn->dev); if (!dev) break; device_move(dev, NULL, DPM_ORDER_DEV_LAST); put_device(dev); } device_unregister(&conn->dev); } static void bt_host_release(struct device *dev) { struct hci_dev *hdev = to_hci_dev(dev); if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) hci_release_dev(hdev); else kfree(hdev); module_put(THIS_MODULE); } static ssize_t reset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hci_dev *hdev = to_hci_dev(dev); if (hdev->reset) hdev->reset(hdev); return count; } static DEVICE_ATTR_WO(reset); static struct attribute *bt_host_attrs[] = { &dev_attr_reset.attr, NULL, }; ATTRIBUTE_GROUPS(bt_host); static const struct device_type bt_host = { .name = "host", .release = bt_host_release, .groups = bt_host_groups, }; void hci_init_sysfs(struct hci_dev *hdev) { struct device *dev = &hdev->dev; dev->type = &bt_host; dev->class = &bt_class; __module_get(THIS_MODULE); device_initialize(dev); } int __init bt_sysfs_init(void) { return class_register(&bt_class); } void bt_sysfs_cleanup(void) { class_unregister(&bt_class); }
27 26 9 9 9 27 23 22 22 21 20 20 20 19 9 8 8 8 6 5 5 6 5 5 17 16 16 4 16 4 16 6 5 3 2 12 9 9 8 6 5 5 4 3 3 3 3 3 7 6 6 2 6 2 6 5 3 2 1 7 20 4 4 4 4 3 3 6 5 33 32 33 31 22 4 8 3 3 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 1 2 3 1 4 3 4 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 // SPDX-License-Identifier: GPL-2.0-or-later /* * NetLabel CIPSO/IPv4 Support * * This file defines the CIPSO/IPv4 functions for the NetLabel system. The * NetLabel system manages static and dynamic label mappings for network * protocols such as CIPSO and RIPSO. * * Author: Paul Moore <paul@paul-moore.com> */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2006 */ #include <linux/types.h> #include <linux/socket.h> #include <linux/string.h> #include <linux/skbuff.h> #include <linux/audit.h> #include <linux/slab.h> #include <net/sock.h> #include <net/netlink.h> #include <net/genetlink.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <linux/atomic.h> #include "netlabel_user.h" #include "netlabel_cipso_v4.h" #include "netlabel_mgmt.h" #include "netlabel_domainhash.h" /* Argument struct for cipso_v4_doi_walk() */ struct netlbl_cipsov4_doiwalk_arg { struct netlink_callback *nl_cb; struct sk_buff *skb; u32 seq; }; /* Argument struct for netlbl_domhsh_walk() */ struct netlbl_domhsh_walk_arg { struct netlbl_audit *audit_info; u32 doi; }; /* NetLabel Generic NETLINK CIPSOv4 family */ static struct genl_family netlbl_cipsov4_gnl_family; /* NetLabel Netlink attribute policy */ static const struct nla_policy netlbl_cipsov4_genl_policy[NLBL_CIPSOV4_A_MAX + 1] = { [NLBL_CIPSOV4_A_DOI] = { .type = NLA_U32 }, [NLBL_CIPSOV4_A_MTYPE] = { .type = NLA_U32 }, [NLBL_CIPSOV4_A_TAG] = { .type = NLA_U8 }, [NLBL_CIPSOV4_A_TAGLST] = { .type = NLA_NESTED }, [NLBL_CIPSOV4_A_MLSLVLLOC] = { .type = NLA_U32 }, [NLBL_CIPSOV4_A_MLSLVLREM] = { .type = NLA_U32 }, [NLBL_CIPSOV4_A_MLSLVL] = { .type = NLA_NESTED }, [NLBL_CIPSOV4_A_MLSLVLLST] = { .type = NLA_NESTED }, [NLBL_CIPSOV4_A_MLSCATLOC] = { .type = NLA_U32 }, [NLBL_CIPSOV4_A_MLSCATREM] = { .type = NLA_U32 }, [NLBL_CIPSOV4_A_MLSCAT] = { .type = NLA_NESTED }, [NLBL_CIPSOV4_A_MLSCATLST] = { .type = NLA_NESTED }, }; /* * Helper Functions */ /** * netlbl_cipsov4_add_common - Parse the common sections of a ADD message * @info: the Generic NETLINK info block * @doi_def: the CIPSO V4 DOI definition * * Description: * Parse the common sections of a ADD message and fill in the related values * in @doi_def. Returns zero on success, negative values on failure. * */ static int netlbl_cipsov4_add_common(struct genl_info *info, struct cipso_v4_doi *doi_def) { struct nlattr *nla; int nla_rem; u32 iter = 0; doi_def->doi = nla_get_u32(info->attrs[NLBL_CIPSOV4_A_DOI]); if (nla_validate_nested_deprecated(info->attrs[NLBL_CIPSOV4_A_TAGLST], NLBL_CIPSOV4_A_MAX, netlbl_cipsov4_genl_policy, NULL) != 0) return -EINVAL; nla_for_each_nested(nla, info->attrs[NLBL_CIPSOV4_A_TAGLST], nla_rem) if (nla_type(nla) == NLBL_CIPSOV4_A_TAG) { if (iter >= CIPSO_V4_TAG_MAXCNT) return -EINVAL; doi_def->tags[iter++] = nla_get_u8(nla); } while (iter < CIPSO_V4_TAG_MAXCNT) doi_def->tags[iter++] = CIPSO_V4_TAG_INVALID; return 0; } /* * NetLabel Command Handlers */ /** * netlbl_cipsov4_add_std - Adds a CIPSO V4 DOI definition * @info: the Generic NETLINK info block * @audit_info: NetLabel audit information * * Description: * Create a new CIPSO_V4_MAP_TRANS DOI definition based on the given ADD * message and add it to the CIPSO V4 engine. Return zero on success and * non-zero on error. * */ static int netlbl_cipsov4_add_std(struct genl_info *info, struct netlbl_audit *audit_info) { int ret_val = -EINVAL; struct cipso_v4_doi *doi_def = NULL; struct nlattr *nla_a; struct nlattr *nla_b; int nla_a_rem; int nla_b_rem; u32 iter; if (!info->attrs[NLBL_CIPSOV4_A_TAGLST] || !info->attrs[NLBL_CIPSOV4_A_MLSLVLLST]) return -EINVAL; if (nla_validate_nested_deprecated(info->attrs[NLBL_CIPSOV4_A_MLSLVLLST], NLBL_CIPSOV4_A_MAX, netlbl_cipsov4_genl_policy, NULL) != 0) return -EINVAL; doi_def = kmalloc(sizeof(*doi_def), GFP_KERNEL); if (doi_def == NULL) return -ENOMEM; doi_def->map.std = kzalloc(sizeof(*doi_def->map.std), GFP_KERNEL); if (doi_def->map.std == NULL) { kfree(doi_def); return -ENOMEM; } doi_def->type = CIPSO_V4_MAP_TRANS; ret_val = netlbl_cipsov4_add_common(info, doi_def); if (ret_val != 0) goto add_std_failure; ret_val = -EINVAL; nla_for_each_nested(nla_a, info->attrs[NLBL_CIPSOV4_A_MLSLVLLST], nla_a_rem) if (nla_type(nla_a) == NLBL_CIPSOV4_A_MLSLVL) { if (nla_validate_nested_deprecated(nla_a, NLBL_CIPSOV4_A_MAX, netlbl_cipsov4_genl_policy, NULL) != 0) goto add_std_failure; nla_for_each_nested(nla_b, nla_a, nla_b_rem) switch (nla_type(nla_b)) { case NLBL_CIPSOV4_A_MLSLVLLOC: if (nla_get_u32(nla_b) > CIPSO_V4_MAX_LOC_LVLS) goto add_std_failure; if (nla_get_u32(nla_b) >= doi_def->map.std->lvl.local_size) doi_def->map.std->lvl.local_size = nla_get_u32(nla_b) + 1; break; case NLBL_CIPSOV4_A_MLSLVLREM: if (nla_get_u32(nla_b) > CIPSO_V4_MAX_REM_LVLS) goto add_std_failure; if (nla_get_u32(nla_b) >= doi_def->map.std->lvl.cipso_size) doi_def->map.std->lvl.cipso_size = nla_get_u32(nla_b) + 1; break; } } doi_def->map.std->lvl.local = kcalloc(doi_def->map.std->lvl.local_size, sizeof(u32), GFP_KERNEL | __GFP_NOWARN); if (doi_def->map.std->lvl.local == NULL) { ret_val = -ENOMEM; goto add_std_failure; } doi_def->map.std->lvl.cipso = kcalloc(doi_def->map.std->lvl.cipso_size, sizeof(u32), GFP_KERNEL | __GFP_NOWARN); if (doi_def->map.std->lvl.cipso == NULL) { ret_val = -ENOMEM; goto add_std_failure; } for (iter = 0; iter < doi_def->map.std->lvl.local_size; iter++) doi_def->map.std->lvl.local[iter] = CIPSO_V4_INV_LVL; for (iter = 0; iter < doi_def->map.std->lvl.cipso_size; iter++) doi_def->map.std->lvl.cipso[iter] = CIPSO_V4_INV_LVL; nla_for_each_nested(nla_a, info->attrs[NLBL_CIPSOV4_A_MLSLVLLST], nla_a_rem) if (nla_type(nla_a) == NLBL_CIPSOV4_A_MLSLVL) { struct nlattr *lvl_loc; struct nlattr *lvl_rem; lvl_loc = nla_find_nested(nla_a, NLBL_CIPSOV4_A_MLSLVLLOC); lvl_rem = nla_find_nested(nla_a, NLBL_CIPSOV4_A_MLSLVLREM); if (lvl_loc == NULL || lvl_rem == NULL) goto add_std_failure; doi_def->map.std->lvl.local[nla_get_u32(lvl_loc)] = nla_get_u32(lvl_rem); doi_def->map.std->lvl.cipso[nla_get_u32(lvl_rem)] = nla_get_u32(lvl_loc); } if (info->attrs[NLBL_CIPSOV4_A_MLSCATLST]) { if (nla_validate_nested_deprecated(info->attrs[NLBL_CIPSOV4_A_MLSCATLST], NLBL_CIPSOV4_A_MAX, netlbl_cipsov4_genl_policy, NULL) != 0) goto add_std_failure; nla_for_each_nested(nla_a, info->attrs[NLBL_CIPSOV4_A_MLSCATLST], nla_a_rem) if (nla_type(nla_a) == NLBL_CIPSOV4_A_MLSCAT) { if (nla_validate_nested_deprecated(nla_a, NLBL_CIPSOV4_A_MAX, netlbl_cipsov4_genl_policy, NULL) != 0) goto add_std_failure; nla_for_each_nested(nla_b, nla_a, nla_b_rem) switch (nla_type(nla_b)) { case NLBL_CIPSOV4_A_MLSCATLOC: if (nla_get_u32(nla_b) > CIPSO_V4_MAX_LOC_CATS) goto add_std_failure; if (nla_get_u32(nla_b) >= doi_def->map.std->cat.local_size) doi_def->map.std->cat.local_size = nla_get_u32(nla_b) + 1; break; case NLBL_CIPSOV4_A_MLSCATREM: if (nla_get_u32(nla_b) > CIPSO_V4_MAX_REM_CATS) goto add_std_failure; if (nla_get_u32(nla_b) >= doi_def->map.std->cat.cipso_size) doi_def->map.std->cat.cipso_size = nla_get_u32(nla_b) + 1; break; } } doi_def->map.std->cat.local = kcalloc( doi_def->map.std->cat.local_size, sizeof(u32), GFP_KERNEL | __GFP_NOWARN); if (doi_def->map.std->cat.local == NULL) { ret_val = -ENOMEM; goto add_std_failure; } doi_def->map.std->cat.cipso = kcalloc( doi_def->map.std->cat.cipso_size, sizeof(u32), GFP_KERNEL | __GFP_NOWARN); if (doi_def->map.std->cat.cipso == NULL) { ret_val = -ENOMEM; goto add_std_failure; } for (iter = 0; iter < doi_def->map.std->cat.local_size; iter++) doi_def->map.std->cat.local[iter] = CIPSO_V4_INV_CAT; for (iter = 0; iter < doi_def->map.std->cat.cipso_size; iter++) doi_def->map.std->cat.cipso[iter] = CIPSO_V4_INV_CAT; nla_for_each_nested(nla_a, info->attrs[NLBL_CIPSOV4_A_MLSCATLST], nla_a_rem) if (nla_type(nla_a) == NLBL_CIPSOV4_A_MLSCAT) { struct nlattr *cat_loc; struct nlattr *cat_rem; cat_loc = nla_find_nested(nla_a, NLBL_CIPSOV4_A_MLSCATLOC); cat_rem = nla_find_nested(nla_a, NLBL_CIPSOV4_A_MLSCATREM); if (cat_loc == NULL || cat_rem == NULL) goto add_std_failure; doi_def->map.std->cat.local[ nla_get_u32(cat_loc)] = nla_get_u32(cat_rem); doi_def->map.std->cat.cipso[ nla_get_u32(cat_rem)] = nla_get_u32(cat_loc); } } ret_val = cipso_v4_doi_add(doi_def, audit_info); if (ret_val != 0) goto add_std_failure; return 0; add_std_failure: cipso_v4_doi_free(doi_def); return ret_val; } /** * netlbl_cipsov4_add_pass - Adds a CIPSO V4 DOI definition * @info: the Generic NETLINK info block * @audit_info: NetLabel audit information * * Description: * Create a new CIPSO_V4_MAP_PASS DOI definition based on the given ADD message * and add it to the CIPSO V4 engine. Return zero on success and non-zero on * error. * */ static int netlbl_cipsov4_add_pass(struct genl_info *info, struct netlbl_audit *audit_info) { int ret_val; struct cipso_v4_doi *doi_def = NULL; if (!info->attrs[NLBL_CIPSOV4_A_TAGLST]) return -EINVAL; doi_def = kmalloc(sizeof(*doi_def), GFP_KERNEL); if (doi_def == NULL) return -ENOMEM; doi_def->type = CIPSO_V4_MAP_PASS; ret_val = netlbl_cipsov4_add_common(info, doi_def); if (ret_val != 0) goto add_pass_failure; ret_val = cipso_v4_doi_add(doi_def, audit_info); if (ret_val != 0) goto add_pass_failure; return 0; add_pass_failure: cipso_v4_doi_free(doi_def); return ret_val; } /** * netlbl_cipsov4_add_local - Adds a CIPSO V4 DOI definition * @info: the Generic NETLINK info block * @audit_info: NetLabel audit information * * Description: * Create a new CIPSO_V4_MAP_LOCAL DOI definition based on the given ADD * message and add it to the CIPSO V4 engine. Return zero on success and * non-zero on error. * */ static int netlbl_cipsov4_add_local(struct genl_info *info, struct netlbl_audit *audit_info) { int ret_val; struct cipso_v4_doi *doi_def = NULL; if (!info->attrs[NLBL_CIPSOV4_A_TAGLST]) return -EINVAL; doi_def = kmalloc(sizeof(*doi_def), GFP_KERNEL); if (doi_def == NULL) return -ENOMEM; doi_def->type = CIPSO_V4_MAP_LOCAL; ret_val = netlbl_cipsov4_add_common(info, doi_def); if (ret_val != 0) goto add_local_failure; ret_val = cipso_v4_doi_add(doi_def, audit_info); if (ret_val != 0) goto add_local_failure; return 0; add_local_failure: cipso_v4_doi_free(doi_def); return ret_val; } /** * netlbl_cipsov4_add - Handle an ADD message * @skb: the NETLINK buffer * @info: the Generic NETLINK info block * * Description: * Create a new DOI definition based on the given ADD message and add it to the * CIPSO V4 engine. Returns zero on success, negative values on failure. * */ static int netlbl_cipsov4_add(struct sk_buff *skb, struct genl_info *info) { int ret_val = -EINVAL; struct netlbl_audit audit_info; if (!info->attrs[NLBL_CIPSOV4_A_DOI] || !info->attrs[NLBL_CIPSOV4_A_MTYPE]) return -EINVAL; netlbl_netlink_auditinfo(&audit_info); switch (nla_get_u32(info->attrs[NLBL_CIPSOV4_A_MTYPE])) { case CIPSO_V4_MAP_TRANS: ret_val = netlbl_cipsov4_add_std(info, &audit_info); break; case CIPSO_V4_MAP_PASS: ret_val = netlbl_cipsov4_add_pass(info, &audit_info); break; case CIPSO_V4_MAP_LOCAL: ret_val = netlbl_cipsov4_add_local(info, &audit_info); break; } if (ret_val == 0) atomic_inc(&netlabel_mgmt_protocount); return ret_val; } /** * netlbl_cipsov4_list - Handle a LIST message * @skb: the NETLINK buffer * @info: the Generic NETLINK info block * * Description: * Process a user generated LIST message and respond accordingly. While the * response message generated by the kernel is straightforward, determining * before hand the size of the buffer to allocate is not (we have to generate * the message to know the size). In order to keep this function sane what we * do is allocate a buffer of NLMSG_GOODSIZE and try to fit the response in * that size, if we fail then we restart with a larger buffer and try again. * We continue in this manner until we hit a limit of failed attempts then we * give up and just send an error message. Returns zero on success and * negative values on error. * */ static int netlbl_cipsov4_list(struct sk_buff *skb, struct genl_info *info) { int ret_val; struct sk_buff *ans_skb = NULL; u32 nlsze_mult = 1; void *data; u32 doi; struct nlattr *nla_a; struct nlattr *nla_b; struct cipso_v4_doi *doi_def; u32 iter; if (!info->attrs[NLBL_CIPSOV4_A_DOI]) { ret_val = -EINVAL; goto list_failure; } list_start: ans_skb = nlmsg_new(NLMSG_DEFAULT_SIZE * nlsze_mult, GFP_KERNEL); if (ans_skb == NULL) { ret_val = -ENOMEM; goto list_failure; } data = genlmsg_put_reply(ans_skb, info, &netlbl_cipsov4_gnl_family, 0, NLBL_CIPSOV4_C_LIST); if (data == NULL) { ret_val = -ENOMEM; goto list_failure; } doi = nla_get_u32(info->attrs[NLBL_CIPSOV4_A_DOI]); rcu_read_lock(); doi_def = cipso_v4_doi_getdef(doi); if (doi_def == NULL) { ret_val = -EINVAL; goto list_failure_lock; } ret_val = nla_put_u32(ans_skb, NLBL_CIPSOV4_A_MTYPE, doi_def->type); if (ret_val != 0) goto list_failure_lock; nla_a = nla_nest_start_noflag(ans_skb, NLBL_CIPSOV4_A_TAGLST); if (nla_a == NULL) { ret_val = -ENOMEM; goto list_failure_lock; } for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT && doi_def->tags[iter] != CIPSO_V4_TAG_INVALID; iter++) { ret_val = nla_put_u8(ans_skb, NLBL_CIPSOV4_A_TAG, doi_def->tags[iter]); if (ret_val != 0) goto list_failure_lock; } nla_nest_end(ans_skb, nla_a); switch (doi_def->type) { case CIPSO_V4_MAP_TRANS: nla_a = nla_nest_start_noflag(ans_skb, NLBL_CIPSOV4_A_MLSLVLLST); if (nla_a == NULL) { ret_val = -ENOMEM; goto list_failure_lock; } for (iter = 0; iter < doi_def->map.std->lvl.local_size; iter++) { if (doi_def->map.std->lvl.local[iter] == CIPSO_V4_INV_LVL) continue; nla_b = nla_nest_start_noflag(ans_skb, NLBL_CIPSOV4_A_MLSLVL); if (nla_b == NULL) { ret_val = -ENOMEM; goto list_retry; } ret_val = nla_put_u32(ans_skb, NLBL_CIPSOV4_A_MLSLVLLOC, iter); if (ret_val != 0) goto list_retry; ret_val = nla_put_u32(ans_skb, NLBL_CIPSOV4_A_MLSLVLREM, doi_def->map.std->lvl.local[iter]); if (ret_val != 0) goto list_retry; nla_nest_end(ans_skb, nla_b); } nla_nest_end(ans_skb, nla_a); nla_a = nla_nest_start_noflag(ans_skb, NLBL_CIPSOV4_A_MLSCATLST); if (nla_a == NULL) { ret_val = -ENOMEM; goto list_retry; } for (iter = 0; iter < doi_def->map.std->cat.local_size; iter++) { if (doi_def->map.std->cat.local[iter] == CIPSO_V4_INV_CAT) continue; nla_b = nla_nest_start_noflag(ans_skb, NLBL_CIPSOV4_A_MLSCAT); if (nla_b == NULL) { ret_val = -ENOMEM; goto list_retry; } ret_val = nla_put_u32(ans_skb, NLBL_CIPSOV4_A_MLSCATLOC, iter); if (ret_val != 0) goto list_retry; ret_val = nla_put_u32(ans_skb, NLBL_CIPSOV4_A_MLSCATREM, doi_def->map.std->cat.local[iter]); if (ret_val != 0) goto list_retry; nla_nest_end(ans_skb, nla_b); } nla_nest_end(ans_skb, nla_a); break; } cipso_v4_doi_putdef(doi_def); rcu_read_unlock(); genlmsg_end(ans_skb, data); return genlmsg_reply(ans_skb, info); list_retry: /* XXX - this limit is a guesstimate */ if (nlsze_mult < 4) { cipso_v4_doi_putdef(doi_def); rcu_read_unlock(); kfree_skb(ans_skb); nlsze_mult *= 2; goto list_start; } list_failure_lock: cipso_v4_doi_putdef(doi_def); rcu_read_unlock(); list_failure: kfree_skb(ans_skb); return ret_val; } /** * netlbl_cipsov4_listall_cb - cipso_v4_doi_walk() callback for LISTALL * @doi_def: the CIPSOv4 DOI definition * @arg: the netlbl_cipsov4_doiwalk_arg structure * * Description: * This function is designed to be used as a callback to the * cipso_v4_doi_walk() function for use in generating a response for a LISTALL * message. Returns the size of the message on success, negative values on * failure. * */ static int netlbl_cipsov4_listall_cb(struct cipso_v4_doi *doi_def, void *arg) { int ret_val = -ENOMEM; struct netlbl_cipsov4_doiwalk_arg *cb_arg = arg; void *data; data = genlmsg_put(cb_arg->skb, NETLINK_CB(cb_arg->nl_cb->skb).portid, cb_arg->seq, &netlbl_cipsov4_gnl_family, NLM_F_MULTI, NLBL_CIPSOV4_C_LISTALL); if (data == NULL) goto listall_cb_failure; ret_val = nla_put_u32(cb_arg->skb, NLBL_CIPSOV4_A_DOI, doi_def->doi); if (ret_val != 0) goto listall_cb_failure; ret_val = nla_put_u32(cb_arg->skb, NLBL_CIPSOV4_A_MTYPE, doi_def->type); if (ret_val != 0) goto listall_cb_failure; genlmsg_end(cb_arg->skb, data); return 0; listall_cb_failure: genlmsg_cancel(cb_arg->skb, data); return ret_val; } /** * netlbl_cipsov4_listall - Handle a LISTALL message * @skb: the NETLINK buffer * @cb: the NETLINK callback * * Description: * Process a user generated LISTALL message and respond accordingly. Returns * zero on success and negative values on error. * */ static int netlbl_cipsov4_listall(struct sk_buff *skb, struct netlink_callback *cb) { struct netlbl_cipsov4_doiwalk_arg cb_arg; u32 doi_skip = cb->args[0]; cb_arg.nl_cb = cb; cb_arg.skb = skb; cb_arg.seq = cb->nlh->nlmsg_seq; cipso_v4_doi_walk(&doi_skip, netlbl_cipsov4_listall_cb, &cb_arg); cb->args[0] = doi_skip; return skb->len; } /** * netlbl_cipsov4_remove_cb - netlbl_cipsov4_remove() callback for REMOVE * @entry: LSM domain mapping entry * @arg: the netlbl_domhsh_walk_arg structure * * Description: * This function is intended for use by netlbl_cipsov4_remove() as the callback * for the netlbl_domhsh_walk() function; it removes LSM domain map entries * which are associated with the CIPSO DOI specified in @arg. Returns zero on * success, negative values on failure. * */ static int netlbl_cipsov4_remove_cb(struct netlbl_dom_map *entry, void *arg) { struct netlbl_domhsh_walk_arg *cb_arg = arg; if (entry->def.type == NETLBL_NLTYPE_CIPSOV4 && entry->def.cipso->doi == cb_arg->doi) return netlbl_domhsh_remove_entry(entry, cb_arg->audit_info); return 0; } /** * netlbl_cipsov4_remove - Handle a REMOVE message * @skb: the NETLINK buffer * @info: the Generic NETLINK info block * * Description: * Process a user generated REMOVE message and respond accordingly. Returns * zero on success, negative values on failure. * */ static int netlbl_cipsov4_remove(struct sk_buff *skb, struct genl_info *info) { int ret_val = -EINVAL; struct netlbl_domhsh_walk_arg cb_arg; struct netlbl_audit audit_info; u32 skip_bkt = 0; u32 skip_chain = 0; if (!info->attrs[NLBL_CIPSOV4_A_DOI]) return -EINVAL; netlbl_netlink_auditinfo(&audit_info); cb_arg.doi = nla_get_u32(info->attrs[NLBL_CIPSOV4_A_DOI]); cb_arg.audit_info = &audit_info; ret_val = netlbl_domhsh_walk(&skip_bkt, &skip_chain, netlbl_cipsov4_remove_cb, &cb_arg); if (ret_val == 0 || ret_val == -ENOENT) { ret_val = cipso_v4_doi_remove(cb_arg.doi, &audit_info); if (ret_val == 0) atomic_dec(&netlabel_mgmt_protocount); } return ret_val; } /* * NetLabel Generic NETLINK Command Definitions */ static const struct genl_small_ops netlbl_cipsov4_ops[] = { { .cmd = NLBL_CIPSOV4_C_ADD, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = netlbl_cipsov4_add, .dumpit = NULL, }, { .cmd = NLBL_CIPSOV4_C_REMOVE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = netlbl_cipsov4_remove, .dumpit = NULL, }, { .cmd = NLBL_CIPSOV4_C_LIST, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = 0, .doit = netlbl_cipsov4_list, .dumpit = NULL, }, { .cmd = NLBL_CIPSOV4_C_LISTALL, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = 0, .doit = NULL, .dumpit = netlbl_cipsov4_listall, }, }; static struct genl_family netlbl_cipsov4_gnl_family __ro_after_init = { .hdrsize = 0, .name = NETLBL_NLTYPE_CIPSOV4_NAME, .version = NETLBL_PROTO_VERSION, .maxattr = NLBL_CIPSOV4_A_MAX, .policy = netlbl_cipsov4_genl_policy, .module = THIS_MODULE, .small_ops = netlbl_cipsov4_ops, .n_small_ops = ARRAY_SIZE(netlbl_cipsov4_ops), .resv_start_op = NLBL_CIPSOV4_C_LISTALL + 1, }; /* * NetLabel Generic NETLINK Protocol Functions */ /** * netlbl_cipsov4_genl_init - Register the CIPSOv4 NetLabel component * * Description: * Register the CIPSOv4 packet NetLabel component with the Generic NETLINK * mechanism. Returns zero on success, negative values on failure. * */ int __init netlbl_cipsov4_genl_init(void) { return genl_register_family(&netlbl_cipsov4_gnl_family); }
8 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 // SPDX-License-Identifier: GPL-2.0-or-later /* * net/core/netclassid_cgroup.c Classid Cgroupfs Handling * * Authors: Thomas Graf <tgraf@suug.ch> */ #include <linux/slab.h> #include <linux/cgroup.h> #include <linux/fdtable.h> #include <linux/sched/task.h> #include <net/cls_cgroup.h> #include <net/sock.h> static inline struct cgroup_cls_state *css_cls_state(struct cgroup_subsys_state *css) { return css ? container_of(css, struct cgroup_cls_state, css) : NULL; } struct cgroup_cls_state *task_cls_state(struct task_struct *p) { return css_cls_state(task_css_check(p, net_cls_cgrp_id, rcu_read_lock_bh_held())); } EXPORT_SYMBOL_GPL(task_cls_state); static struct cgroup_subsys_state * cgrp_css_alloc(struct cgroup_subsys_state *parent_css) { struct cgroup_cls_state *cs; cs = kzalloc(sizeof(*cs), GFP_KERNEL); if (!cs) return ERR_PTR(-ENOMEM); return &cs->css; } static int cgrp_css_online(struct cgroup_subsys_state *css) { struct cgroup_cls_state *cs = css_cls_state(css); struct cgroup_cls_state *parent = css_cls_state(css->parent); if (parent) cs->classid = parent->classid; return 0; } static void cgrp_css_free(struct cgroup_subsys_state *css) { kfree(css_cls_state(css)); } /* * To avoid freezing of sockets creation for tasks with big number of threads * and opened sockets lets release file_lock every 1000 iterated descriptors. * New sockets will already have been created with new classid. */ struct update_classid_context { u32 classid; unsigned int batch; }; #define UPDATE_CLASSID_BATCH 1000 static int update_classid_sock(const void *v, struct file *file, unsigned int n) { struct update_classid_context *ctx = (void *)v; struct socket *sock = sock_from_file(file); if (sock) sock_cgroup_set_classid(&sock->sk->sk_cgrp_data, ctx->classid); if (--ctx->batch == 0) { ctx->batch = UPDATE_CLASSID_BATCH; return n + 1; } return 0; } static void update_classid_task(struct task_struct *p, u32 classid) { struct update_classid_context ctx = { .classid = classid, .batch = UPDATE_CLASSID_BATCH }; unsigned int fd = 0; /* Only update the leader task, when many threads in this task, * so it can avoid the useless traversal. */ if (p != p->group_leader) return; do { task_lock(p); fd = iterate_fd(p->files, fd, update_classid_sock, &ctx); task_unlock(p); cond_resched(); } while (fd); } static void cgrp_attach(struct cgroup_taskset *tset) { struct cgroup_subsys_state *css; struct task_struct *p; cgroup_taskset_for_each(p, css, tset) { update_classid_task(p, css_cls_state(css)->classid); } } static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft) { return css_cls_state(css)->classid; } static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft, u64 value) { struct cgroup_cls_state *cs = css_cls_state(css); struct css_task_iter it; struct task_struct *p; cs->classid = (u32)value; css_task_iter_start(css, 0, &it); while ((p = css_task_iter_next(&it))) update_classid_task(p, cs->classid); css_task_iter_end(&it); return 0; } static struct cftype ss_files[] = { { .name = "classid", .read_u64 = read_classid, .write_u64 = write_classid, }, { } /* terminate */ }; struct cgroup_subsys net_cls_cgrp_subsys = { .css_alloc = cgrp_css_alloc, .css_online = cgrp_css_online, .css_free = cgrp_css_free, .attach = cgrp_attach, .legacy_cftypes = ss_files, };
9 3 9 2 1 2 2 2 2 1 1 16 16 2 13 1 15 8 8 15 3 16 16 15 14 15 15 21 16 15 15 15 19 5 4 1 2 2 2 1 1 1 1 1 5 6 1 4 1 3 3 2 1 1 1 1 1 8 8 8 8 8 8 6 4 8 8 7 8 8 8 7 8 8 3 3 3 8 8 1 1 1 2 2 2 2 2 2 1 1 1 1 11 7 2 2 1 7 7 11 1 10 1 10 15 15 9 15 14 1 1 14 6 6 3 3 3 3 13 12 2 2 2 2 13 13 11 11 11 10 3 11 1 11 11 2 1 2 1 2 11 11 11 11 11 21 22 22 22 15 15 13 15 14 15 11 18 10 11 20 7 7 6 6 6 7 8 8 4 4 1 4 8 4 5 1 4 1 5 62 62 61 56 3 55 1 1 56 3 1 3 1 3 3 3 3 1 3 1 2 3 33 7 13 9 9 45 30 1 1 45 34 12 13 45 24 3 24 3 3 3 3 3 3 3 1 1 44 42 44 44 5 44 24 20 44 3 3 45 5 45 5 45 1 45 44 44 45 45 34 3 45 44 45 33 33 39 38 39 1 39 38 38 37 36 34 34 1 33 33 1 33 33 33 33 33 32 2 32 30 3 32 1 32 31 31 1 2 39 28 24 24 24 4 3 21 24 2 50 43 50 3 3 49 3 50 18 45 2 81 78 75 83 83 81 80 73 7 75 74 7 75 73 71 72 73 5 72 5 71 71 71 31 27 67 65 65 5 1 3 1 3 62 54 50 55 28 26 4 36 12 6 13 49 15 2 6 2 3 49 30 10 9 36 45 6 56 81 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 // SPDX-License-Identifier: GPL-2.0 #include <linux/fanotify.h> #include <linux/fcntl.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/anon_inodes.h> #include <linux/fsnotify_backend.h> #include <linux/init.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/poll.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/uaccess.h> #include <linux/compat.h> #include <linux/sched/signal.h> #include <linux/memcontrol.h> #include <linux/statfs.h> #include <linux/exportfs.h> #include <asm/ioctls.h> #include "../fsnotify.h" #include "../fdinfo.h" #include "fanotify.h" #define FANOTIFY_DEFAULT_MAX_EVENTS 16384 #define FANOTIFY_OLD_DEFAULT_MAX_MARKS 8192 #define FANOTIFY_DEFAULT_MAX_GROUPS 128 #define FANOTIFY_DEFAULT_FEE_POOL_SIZE 32 /* * Legacy fanotify marks limits (8192) is per group and we introduced a tunable * limit of marks per user, similar to inotify. Effectively, the legacy limit * of fanotify marks per user is <max marks per group> * <max groups per user>. * This default limit (1M) also happens to match the increased limit of inotify * max_user_watches since v5.10. */ #define FANOTIFY_DEFAULT_MAX_USER_MARKS \ (FANOTIFY_OLD_DEFAULT_MAX_MARKS * FANOTIFY_DEFAULT_MAX_GROUPS) /* * Most of the memory cost of adding an inode mark is pinning the marked inode. * The size of the filesystem inode struct is not uniform across filesystems, * so double the size of a VFS inode is used as a conservative approximation. */ #define INODE_MARK_COST (2 * sizeof(struct inode)) /* configurable via /proc/sys/fs/fanotify/ */ static int fanotify_max_queued_events __read_mostly; #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> static long ft_zero = 0; static long ft_int_max = INT_MAX; static const struct ctl_table fanotify_table[] = { { .procname = "max_user_groups", .data = &init_user_ns.ucount_max[UCOUNT_FANOTIFY_GROUPS], .maxlen = sizeof(long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, .extra1 = &ft_zero, .extra2 = &ft_int_max, }, { .procname = "max_user_marks", .data = &init_user_ns.ucount_max[UCOUNT_FANOTIFY_MARKS], .maxlen = sizeof(long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, .extra1 = &ft_zero, .extra2 = &ft_int_max, }, { .procname = "max_queued_events", .data = &fanotify_max_queued_events, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO }, }; static void __init fanotify_sysctls_init(void) { register_sysctl("fs/fanotify", fanotify_table); } #else #define fanotify_sysctls_init() do { } while (0) #endif /* CONFIG_SYSCTL */ /* * All flags that may be specified in parameter event_f_flags of fanotify_init. * * Internal and external open flags are stored together in field f_flags of * struct file. Only external open flags shall be allowed in event_f_flags. * Internal flags like FMODE_EXEC shall be excluded. */ #define FANOTIFY_INIT_ALL_EVENT_F_BITS ( \ O_ACCMODE | O_APPEND | O_NONBLOCK | \ __O_SYNC | O_DSYNC | O_CLOEXEC | \ O_LARGEFILE | O_NOATIME ) extern const struct fsnotify_ops fanotify_fsnotify_ops; struct kmem_cache *fanotify_mark_cache __ro_after_init; struct kmem_cache *fanotify_fid_event_cachep __ro_after_init; struct kmem_cache *fanotify_path_event_cachep __ro_after_init; struct kmem_cache *fanotify_perm_event_cachep __ro_after_init; #define FANOTIFY_EVENT_ALIGN 4 #define FANOTIFY_FID_INFO_HDR_LEN \ (sizeof(struct fanotify_event_info_fid) + sizeof(struct file_handle)) #define FANOTIFY_PIDFD_INFO_LEN \ sizeof(struct fanotify_event_info_pidfd) #define FANOTIFY_ERROR_INFO_LEN \ (sizeof(struct fanotify_event_info_error)) #define FANOTIFY_RANGE_INFO_LEN \ (sizeof(struct fanotify_event_info_range)) static int fanotify_fid_info_len(int fh_len, int name_len) { int info_len = fh_len; if (name_len) info_len += name_len + 1; return roundup(FANOTIFY_FID_INFO_HDR_LEN + info_len, FANOTIFY_EVENT_ALIGN); } /* FAN_RENAME may have one or two dir+name info records */ static int fanotify_dir_name_info_len(struct fanotify_event *event) { struct fanotify_info *info = fanotify_event_info(event); int dir_fh_len = fanotify_event_dir_fh_len(event); int dir2_fh_len = fanotify_event_dir2_fh_len(event); int info_len = 0; if (dir_fh_len) info_len += fanotify_fid_info_len(dir_fh_len, info->name_len); if (dir2_fh_len) info_len += fanotify_fid_info_len(dir2_fh_len, info->name2_len); return info_len; } static size_t fanotify_event_len(unsigned int info_mode, struct fanotify_event *event) { size_t event_len = FAN_EVENT_METADATA_LEN; int fh_len; int dot_len = 0; if (fanotify_is_error_event(event->mask)) event_len += FANOTIFY_ERROR_INFO_LEN; if (fanotify_event_has_any_dir_fh(event)) { event_len += fanotify_dir_name_info_len(event); } else if ((info_mode & FAN_REPORT_NAME) && (event->mask & FAN_ONDIR)) { /* * With group flag FAN_REPORT_NAME, if name was not recorded in * event on a directory, we will report the name ".". */ dot_len = 1; } if (fanotify_event_has_object_fh(event)) { fh_len = fanotify_event_object_fh_len(event); event_len += fanotify_fid_info_len(fh_len, dot_len); } if (info_mode & FAN_REPORT_PIDFD) event_len += FANOTIFY_PIDFD_INFO_LEN; if (fanotify_event_has_access_range(event)) event_len += FANOTIFY_RANGE_INFO_LEN; return event_len; } /* * Remove an hashed event from merge hash table. */ static void fanotify_unhash_event(struct fsnotify_group *group, struct fanotify_event *event) { assert_spin_locked(&group->notification_lock); pr_debug("%s: group=%p event=%p bucket=%u\n", __func__, group, event, fanotify_event_hash_bucket(group, event)); if (WARN_ON_ONCE(hlist_unhashed(&event->merge_list))) return; hlist_del_init(&event->merge_list); } /* * Get an fanotify notification event if one exists and is small * enough to fit in "count". Return an error pointer if the count * is not large enough. When permission event is dequeued, its state is * updated accordingly. */ static struct fanotify_event *get_one_event(struct fsnotify_group *group, size_t count) { size_t event_size; struct fanotify_event *event = NULL; struct fsnotify_event *fsn_event; unsigned int info_mode = FAN_GROUP_FLAG(group, FANOTIFY_INFO_MODES); pr_debug("%s: group=%p count=%zd\n", __func__, group, count); spin_lock(&group->notification_lock); fsn_event = fsnotify_peek_first_event(group); if (!fsn_event) goto out; event = FANOTIFY_E(fsn_event); event_size = fanotify_event_len(info_mode, event); if (event_size > count) { event = ERR_PTR(-EINVAL); goto out; } /* * Held the notification_lock the whole time, so this is the * same event we peeked above. */ fsnotify_remove_first_event(group); if (fanotify_is_perm_event(event->mask)) FANOTIFY_PERM(event)->state = FAN_EVENT_REPORTED; if (fanotify_is_hashed_event(event->mask)) fanotify_unhash_event(group, event); out: spin_unlock(&group->notification_lock); return event; } static int create_fd(struct fsnotify_group *group, const struct path *path, struct file **file) { int client_fd; struct file *new_file; client_fd = get_unused_fd_flags(group->fanotify_data.f_flags); if (client_fd < 0) return client_fd; /* * We provide an fd for the userspace program, so it could access the * file without generating fanotify events itself. */ new_file = dentry_open_nonotify(path, group->fanotify_data.f_flags, current_cred()); if (IS_ERR(new_file)) { put_unused_fd(client_fd); client_fd = PTR_ERR(new_file); } else { *file = new_file; } return client_fd; } static int process_access_response_info(const char __user *info, size_t info_len, struct fanotify_response_info_audit_rule *friar) { if (info_len != sizeof(*friar)) return -EINVAL; if (copy_from_user(friar, info, sizeof(*friar))) return -EFAULT; if (friar->hdr.type != FAN_RESPONSE_INFO_AUDIT_RULE) return -EINVAL; if (friar->hdr.pad != 0) return -EINVAL; if (friar->hdr.len != sizeof(*friar)) return -EINVAL; return info_len; } /* * Finish processing of permission event by setting it to ANSWERED state and * drop group->notification_lock. */ static void finish_permission_event(struct fsnotify_group *group, struct fanotify_perm_event *event, u32 response, struct fanotify_response_info_audit_rule *friar) __releases(&group->notification_lock) { bool destroy = false; assert_spin_locked(&group->notification_lock); event->response = response & ~FAN_INFO; if (response & FAN_INFO) memcpy(&event->audit_rule, friar, sizeof(*friar)); if (event->state == FAN_EVENT_CANCELED) destroy = true; else event->state = FAN_EVENT_ANSWERED; spin_unlock(&group->notification_lock); if (destroy) fsnotify_destroy_event(group, &event->fae.fse); } static int process_access_response(struct fsnotify_group *group, struct fanotify_response *response_struct, const char __user *info, size_t info_len) { struct fanotify_perm_event *event; int fd = response_struct->fd; u32 response = response_struct->response; int errno = fanotify_get_response_errno(response); int ret = info_len; struct fanotify_response_info_audit_rule friar; pr_debug("%s: group=%p fd=%d response=%x errno=%d buf=%p size=%zu\n", __func__, group, fd, response, errno, info, info_len); /* * make sure the response is valid, if invalid we do nothing and either * userspace can send a valid response or we will clean it up after the * timeout */ if (response & ~FANOTIFY_RESPONSE_VALID_MASK) return -EINVAL; switch (response & FANOTIFY_RESPONSE_ACCESS) { case FAN_ALLOW: if (errno) return -EINVAL; break; case FAN_DENY: /* Custom errno is supported only for pre-content groups */ if (errno && group->priority != FSNOTIFY_PRIO_PRE_CONTENT) return -EINVAL; /* * Limit errno to values expected on open(2)/read(2)/write(2) * of regular files. */ switch (errno) { case 0: case EIO: case EPERM: case EBUSY: case ETXTBSY: case EAGAIN: case ENOSPC: case EDQUOT: break; default: return -EINVAL; } break; default: return -EINVAL; } if ((response & FAN_AUDIT) && !FAN_GROUP_FLAG(group, FAN_ENABLE_AUDIT)) return -EINVAL; if (response & FAN_INFO) { ret = process_access_response_info(info, info_len, &friar); if (ret < 0) return ret; if (fd == FAN_NOFD) return ret; } else { ret = 0; } if (fd < 0) return -EINVAL; spin_lock(&group->notification_lock); list_for_each_entry(event, &group->fanotify_data.access_list, fae.fse.list) { if (event->fd != fd) continue; list_del_init(&event->fae.fse.list); finish_permission_event(group, event, response, &friar); wake_up(&group->fanotify_data.access_waitq); return ret; } spin_unlock(&group->notification_lock); return -ENOENT; } static size_t copy_error_info_to_user(struct fanotify_event *event, char __user *buf, int count) { struct fanotify_event_info_error info = { }; struct fanotify_error_event *fee = FANOTIFY_EE(event); info.hdr.info_type = FAN_EVENT_INFO_TYPE_ERROR; info.hdr.len = FANOTIFY_ERROR_INFO_LEN; if (WARN_ON(count < info.hdr.len)) return -EFAULT; info.error = fee->error; info.error_count = fee->err_count; if (copy_to_user(buf, &info, sizeof(info))) return -EFAULT; return info.hdr.len; } static int copy_fid_info_to_user(__kernel_fsid_t *fsid, struct fanotify_fh *fh, int info_type, const char *name, size_t name_len, char __user *buf, size_t count) { struct fanotify_event_info_fid info = { }; struct file_handle handle = { }; unsigned char bounce[FANOTIFY_INLINE_FH_LEN], *fh_buf; size_t fh_len = fh ? fh->len : 0; size_t info_len = fanotify_fid_info_len(fh_len, name_len); size_t len = info_len; pr_debug("%s: fh_len=%zu name_len=%zu, info_len=%zu, count=%zu\n", __func__, fh_len, name_len, info_len, count); if (WARN_ON_ONCE(len < sizeof(info) || len > count)) return -EFAULT; /* * Copy event info fid header followed by variable sized file handle * and optionally followed by variable sized filename. */ switch (info_type) { case FAN_EVENT_INFO_TYPE_FID: case FAN_EVENT_INFO_TYPE_DFID: if (WARN_ON_ONCE(name_len)) return -EFAULT; break; case FAN_EVENT_INFO_TYPE_DFID_NAME: case FAN_EVENT_INFO_TYPE_OLD_DFID_NAME: case FAN_EVENT_INFO_TYPE_NEW_DFID_NAME: if (WARN_ON_ONCE(!name || !name_len)) return -EFAULT; break; default: return -EFAULT; } info.hdr.info_type = info_type; info.hdr.len = len; info.fsid = *fsid; if (copy_to_user(buf, &info, sizeof(info))) return -EFAULT; buf += sizeof(info); len -= sizeof(info); if (WARN_ON_ONCE(len < sizeof(handle))) return -EFAULT; handle.handle_type = fh->type; handle.handle_bytes = fh_len; /* Mangle handle_type for bad file_handle */ if (!fh_len) handle.handle_type = FILEID_INVALID; if (copy_to_user(buf, &handle, sizeof(handle))) return -EFAULT; buf += sizeof(handle); len -= sizeof(handle); if (WARN_ON_ONCE(len < fh_len)) return -EFAULT; /* * For an inline fh and inline file name, copy through stack to exclude * the copy from usercopy hardening protections. */ fh_buf = fanotify_fh_buf(fh); if (fh_len <= FANOTIFY_INLINE_FH_LEN) { memcpy(bounce, fh_buf, fh_len); fh_buf = bounce; } if (copy_to_user(buf, fh_buf, fh_len)) return -EFAULT; buf += fh_len; len -= fh_len; if (name_len) { /* Copy the filename with terminating null */ name_len++; if (WARN_ON_ONCE(len < name_len)) return -EFAULT; if (copy_to_user(buf, name, name_len)) return -EFAULT; buf += name_len; len -= name_len; } /* Pad with 0's */ WARN_ON_ONCE(len < 0 || len >= FANOTIFY_EVENT_ALIGN); if (len > 0 && clear_user(buf, len)) return -EFAULT; return info_len; } static int copy_pidfd_info_to_user(int pidfd, char __user *buf, size_t count) { struct fanotify_event_info_pidfd info = { }; size_t info_len = FANOTIFY_PIDFD_INFO_LEN; if (WARN_ON_ONCE(info_len > count)) return -EFAULT; info.hdr.info_type = FAN_EVENT_INFO_TYPE_PIDFD; info.hdr.len = info_len; info.pidfd = pidfd; if (copy_to_user(buf, &info, info_len)) return -EFAULT; return info_len; } static size_t copy_range_info_to_user(struct fanotify_event *event, char __user *buf, int count) { struct fanotify_perm_event *pevent = FANOTIFY_PERM(event); struct fanotify_event_info_range info = { }; size_t info_len = FANOTIFY_RANGE_INFO_LEN; if (WARN_ON_ONCE(info_len > count)) return -EFAULT; if (WARN_ON_ONCE(!pevent->ppos)) return -EINVAL; info.hdr.info_type = FAN_EVENT_INFO_TYPE_RANGE; info.hdr.len = info_len; info.offset = *(pevent->ppos); info.count = pevent->count; if (copy_to_user(buf, &info, info_len)) return -EFAULT; return info_len; } static int copy_info_records_to_user(struct fanotify_event *event, struct fanotify_info *info, unsigned int info_mode, int pidfd, char __user *buf, size_t count) { int ret, total_bytes = 0, info_type = 0; unsigned int fid_mode = info_mode & FANOTIFY_FID_BITS; unsigned int pidfd_mode = info_mode & FAN_REPORT_PIDFD; /* * Event info records order is as follows: * 1. dir fid + name * 2. (optional) new dir fid + new name * 3. (optional) child fid */ if (fanotify_event_has_dir_fh(event)) { info_type = info->name_len ? FAN_EVENT_INFO_TYPE_DFID_NAME : FAN_EVENT_INFO_TYPE_DFID; /* FAN_RENAME uses special info types */ if (event->mask & FAN_RENAME) info_type = FAN_EVENT_INFO_TYPE_OLD_DFID_NAME; ret = copy_fid_info_to_user(fanotify_event_fsid(event), fanotify_info_dir_fh(info), info_type, fanotify_info_name(info), info->name_len, buf, count); if (ret < 0) return ret; buf += ret; count -= ret; total_bytes += ret; } /* New dir fid+name may be reported in addition to old dir fid+name */ if (fanotify_event_has_dir2_fh(event)) { info_type = FAN_EVENT_INFO_TYPE_NEW_DFID_NAME; ret = copy_fid_info_to_user(fanotify_event_fsid(event), fanotify_info_dir2_fh(info), info_type, fanotify_info_name2(info), info->name2_len, buf, count); if (ret < 0) return ret; buf += ret; count -= ret; total_bytes += ret; } if (fanotify_event_has_object_fh(event)) { const char *dot = NULL; int dot_len = 0; if (fid_mode == FAN_REPORT_FID || info_type) { /* * With only group flag FAN_REPORT_FID only type FID is * reported. Second info record type is always FID. */ info_type = FAN_EVENT_INFO_TYPE_FID; } else if ((fid_mode & FAN_REPORT_NAME) && (event->mask & FAN_ONDIR)) { /* * With group flag FAN_REPORT_NAME, if name was not * recorded in an event on a directory, report the name * "." with info type DFID_NAME. */ info_type = FAN_EVENT_INFO_TYPE_DFID_NAME; dot = "."; dot_len = 1; } else if ((event->mask & ALL_FSNOTIFY_DIRENT_EVENTS) || (event->mask & FAN_ONDIR)) { /* * With group flag FAN_REPORT_DIR_FID, a single info * record has type DFID for directory entry modification * event and for event on a directory. */ info_type = FAN_EVENT_INFO_TYPE_DFID; } else { /* * With group flags FAN_REPORT_DIR_FID|FAN_REPORT_FID, * a single info record has type FID for event on a * non-directory, when there is no directory to report. * For example, on FAN_DELETE_SELF event. */ info_type = FAN_EVENT_INFO_TYPE_FID; } ret = copy_fid_info_to_user(fanotify_event_fsid(event), fanotify_event_object_fh(event), info_type, dot, dot_len, buf, count); if (ret < 0) return ret; buf += ret; count -= ret; total_bytes += ret; } if (pidfd_mode) { ret = copy_pidfd_info_to_user(pidfd, buf, count); if (ret < 0) return ret; buf += ret; count -= ret; total_bytes += ret; } if (fanotify_is_error_event(event->mask)) { ret = copy_error_info_to_user(event, buf, count); if (ret < 0) return ret; buf += ret; count -= ret; total_bytes += ret; } if (fanotify_event_has_access_range(event)) { ret = copy_range_info_to_user(event, buf, count); if (ret < 0) return ret; buf += ret; count -= ret; total_bytes += ret; } return total_bytes; } static ssize_t copy_event_to_user(struct fsnotify_group *group, struct fanotify_event *event, char __user *buf, size_t count) { struct fanotify_event_metadata metadata; const struct path *path = fanotify_event_path(event); struct fanotify_info *info = fanotify_event_info(event); unsigned int info_mode = FAN_GROUP_FLAG(group, FANOTIFY_INFO_MODES); unsigned int pidfd_mode = info_mode & FAN_REPORT_PIDFD; struct file *f = NULL, *pidfd_file = NULL; int ret, pidfd = -ESRCH, fd = -EBADF; pr_debug("%s: group=%p event=%p\n", __func__, group, event); metadata.event_len = fanotify_event_len(info_mode, event); metadata.metadata_len = FAN_EVENT_METADATA_LEN; metadata.vers = FANOTIFY_METADATA_VERSION; metadata.reserved = 0; metadata.mask = event->mask & FANOTIFY_OUTGOING_EVENTS; metadata.pid = pid_vnr(event->pid); /* * For an unprivileged listener, event->pid can be used to identify the * events generated by the listener process itself, without disclosing * the pids of other processes. */ if (FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV) && task_tgid(current) != event->pid) metadata.pid = 0; /* * For now, fid mode is required for an unprivileged listener and * fid mode does not report fd in events. Keep this check anyway * for safety in case fid mode requirement is relaxed in the future * to allow unprivileged listener to get events with no fd and no fid. */ if (!FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV) && path && path->mnt && path->dentry) { fd = create_fd(group, path, &f); /* * Opening an fd from dentry can fail for several reasons. * For example, when tasks are gone and we try to open their * /proc files or we try to open a WRONLY file like in sysfs * or when trying to open a file that was deleted on the * remote network server. * * For a group with FAN_REPORT_FD_ERROR, we will send the * event with the error instead of the open fd, otherwise * Userspace may not get the error at all. * In any case, userspace will not know which file failed to * open, so add a debug print for further investigation. */ if (fd < 0) { pr_debug("fanotify: create_fd(%pd2) failed err=%d\n", path->dentry, fd); if (!FAN_GROUP_FLAG(group, FAN_REPORT_FD_ERROR)) { /* * Historically, we've handled EOPENSTALE in a * special way and silently dropped such * events. Now we have to keep it to maintain * backward compatibility... */ if (fd == -EOPENSTALE) fd = 0; return fd; } } } if (FAN_GROUP_FLAG(group, FAN_REPORT_FD_ERROR)) metadata.fd = fd; else metadata.fd = fd >= 0 ? fd : FAN_NOFD; if (pidfd_mode) { /* * Complain if the FAN_REPORT_PIDFD and FAN_REPORT_TID mutual * exclusion is ever lifted. At the time of incoporating pidfd * support within fanotify, the pidfd API only supported the * creation of pidfds for thread-group leaders. */ WARN_ON_ONCE(FAN_GROUP_FLAG(group, FAN_REPORT_TID)); /* * The PIDTYPE_TGID check for an event->pid is performed * preemptively in an attempt to catch out cases where the event * listener reads events after the event generating process has * already terminated. Depending on flag FAN_REPORT_FD_ERROR, * report either -ESRCH or FAN_NOPIDFD to the event listener in * those cases with all other pidfd creation errors reported as * the error code itself or as FAN_EPIDFD. */ if (metadata.pid && pid_has_task(event->pid, PIDTYPE_TGID)) pidfd = pidfd_prepare(event->pid, 0, &pidfd_file); if (!FAN_GROUP_FLAG(group, FAN_REPORT_FD_ERROR) && pidfd < 0) pidfd = pidfd == -ESRCH ? FAN_NOPIDFD : FAN_EPIDFD; } ret = -EFAULT; /* * Sanity check copy size in case get_one_event() and * event_len sizes ever get out of sync. */ if (WARN_ON_ONCE(metadata.event_len > count)) goto out_close_fd; if (copy_to_user(buf, &metadata, FAN_EVENT_METADATA_LEN)) goto out_close_fd; buf += FAN_EVENT_METADATA_LEN; count -= FAN_EVENT_METADATA_LEN; ret = copy_info_records_to_user(event, info, info_mode, pidfd, buf, count); if (ret < 0) goto out_close_fd; if (f) fd_install(fd, f); if (pidfd_file) fd_install(pidfd, pidfd_file); if (fanotify_is_perm_event(event->mask)) FANOTIFY_PERM(event)->fd = fd; return metadata.event_len; out_close_fd: if (f) { put_unused_fd(fd); fput(f); } if (pidfd_file) { put_unused_fd(pidfd); fput(pidfd_file); } return ret; } /* intofiy userspace file descriptor functions */ static __poll_t fanotify_poll(struct file *file, poll_table *wait) { struct fsnotify_group *group = file->private_data; __poll_t ret = 0; poll_wait(file, &group->notification_waitq, wait); spin_lock(&group->notification_lock); if (!fsnotify_notify_queue_is_empty(group)) ret = EPOLLIN | EPOLLRDNORM; spin_unlock(&group->notification_lock); return ret; } static ssize_t fanotify_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct fsnotify_group *group; struct fanotify_event *event; char __user *start; int ret; DEFINE_WAIT_FUNC(wait, woken_wake_function); start = buf; group = file->private_data; pr_debug("%s: group=%p\n", __func__, group); add_wait_queue(&group->notification_waitq, &wait); while (1) { /* * User can supply arbitrarily large buffer. Avoid softlockups * in case there are lots of available events. */ cond_resched(); event = get_one_event(group, count); if (IS_ERR(event)) { ret = PTR_ERR(event); break; } if (!event) { ret = -EAGAIN; if (file->f_flags & O_NONBLOCK) break; ret = -ERESTARTSYS; if (signal_pending(current)) break; if (start != buf) break; wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); continue; } ret = copy_event_to_user(group, event, buf, count); /* * Permission events get queued to wait for response. Other * events can be destroyed now. */ if (!fanotify_is_perm_event(event->mask)) { fsnotify_destroy_event(group, &event->fse); } else { if (ret <= 0 || FANOTIFY_PERM(event)->fd < 0) { spin_lock(&group->notification_lock); finish_permission_event(group, FANOTIFY_PERM(event), FAN_DENY, NULL); wake_up(&group->fanotify_data.access_waitq); } else { spin_lock(&group->notification_lock); list_add_tail(&event->fse.list, &group->fanotify_data.access_list); spin_unlock(&group->notification_lock); } } if (ret < 0) break; buf += ret; count -= ret; } remove_wait_queue(&group->notification_waitq, &wait); if (start != buf && ret != -EFAULT) ret = buf - start; return ret; } static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { struct fanotify_response response; struct fsnotify_group *group; int ret; const char __user *info_buf = buf + sizeof(struct fanotify_response); size_t info_len; if (!IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS)) return -EINVAL; group = file->private_data; pr_debug("%s: group=%p count=%zu\n", __func__, group, count); if (count < sizeof(response)) return -EINVAL; if (copy_from_user(&response, buf, sizeof(response))) return -EFAULT; info_len = count - sizeof(response); ret = process_access_response(group, &response, info_buf, info_len); if (ret < 0) count = ret; else count = sizeof(response) + ret; return count; } static int fanotify_release(struct inode *ignored, struct file *file) { struct fsnotify_group *group = file->private_data; struct fsnotify_event *fsn_event; /* * Stop new events from arriving in the notification queue. since * userspace cannot use fanotify fd anymore, no event can enter or * leave access_list by now either. */ fsnotify_group_stop_queueing(group); /* * Process all permission events on access_list and notification queue * and simulate reply from userspace. */ spin_lock(&group->notification_lock); while (!list_empty(&group->fanotify_data.access_list)) { struct fanotify_perm_event *event; event = list_first_entry(&group->fanotify_data.access_list, struct fanotify_perm_event, fae.fse.list); list_del_init(&event->fae.fse.list); finish_permission_event(group, event, FAN_ALLOW, NULL); spin_lock(&group->notification_lock); } /* * Destroy all non-permission events. For permission events just * dequeue them and set the response. They will be freed once the * response is consumed and fanotify_get_response() returns. */ while ((fsn_event = fsnotify_remove_first_event(group))) { struct fanotify_event *event = FANOTIFY_E(fsn_event); if (!(event->mask & FANOTIFY_PERM_EVENTS)) { spin_unlock(&group->notification_lock); fsnotify_destroy_event(group, fsn_event); } else { finish_permission_event(group, FANOTIFY_PERM(event), FAN_ALLOW, NULL); } spin_lock(&group->notification_lock); } spin_unlock(&group->notification_lock); /* Response for all permission events it set, wakeup waiters */ wake_up(&group->fanotify_data.access_waitq); /* matches the fanotify_init->fsnotify_alloc_group */ fsnotify_destroy_group(group); return 0; } static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct fsnotify_group *group; struct fsnotify_event *fsn_event; void __user *p; int ret = -ENOTTY; size_t send_len = 0; group = file->private_data; p = (void __user *) arg; switch (cmd) { case FIONREAD: spin_lock(&group->notification_lock); list_for_each_entry(fsn_event, &group->notification_list, list) send_len += FAN_EVENT_METADATA_LEN; spin_unlock(&group->notification_lock); ret = put_user(send_len, (int __user *) p); break; } return ret; } static const struct file_operations fanotify_fops = { .show_fdinfo = fanotify_show_fdinfo, .poll = fanotify_poll, .read = fanotify_read, .write = fanotify_write, .fasync = NULL, .release = fanotify_release, .unlocked_ioctl = fanotify_ioctl, .compat_ioctl = compat_ptr_ioctl, .llseek = noop_llseek, }; static int fanotify_find_path(int dfd, const char __user *filename, struct path *path, unsigned int flags, __u64 mask, unsigned int obj_type) { int ret; pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__, dfd, filename, flags); if (filename == NULL) { CLASS(fd, f)(dfd); if (fd_empty(f)) return -EBADF; if ((flags & FAN_MARK_ONLYDIR) && !(S_ISDIR(file_inode(fd_file(f))->i_mode))) return -ENOTDIR; *path = fd_file(f)->f_path; path_get(path); } else { unsigned int lookup_flags = 0; if (!(flags & FAN_MARK_DONT_FOLLOW)) lookup_flags |= LOOKUP_FOLLOW; if (flags & FAN_MARK_ONLYDIR) lookup_flags |= LOOKUP_DIRECTORY; ret = user_path_at(dfd, filename, lookup_flags, path); if (ret) goto out; } /* you can only watch an inode if you have read permissions on it */ ret = path_permission(path, MAY_READ); if (ret) { path_put(path); goto out; } ret = security_path_notify(path, mask, obj_type); if (ret) path_put(path); out: return ret; } static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark, __u32 mask, unsigned int flags, __u32 umask, int *destroy) { __u32 oldmask, newmask; /* umask bits cannot be removed by user */ mask &= ~umask; spin_lock(&fsn_mark->lock); oldmask = fsnotify_calc_mask(fsn_mark); if (!(flags & FANOTIFY_MARK_IGNORE_BITS)) { fsn_mark->mask &= ~mask; } else { fsn_mark->ignore_mask &= ~mask; } newmask = fsnotify_calc_mask(fsn_mark); /* * We need to keep the mark around even if remaining mask cannot * result in any events (e.g. mask == FAN_ONDIR) to support incremenal * changes to the mask. * Destroy mark when only umask bits remain. */ *destroy = !((fsn_mark->mask | fsn_mark->ignore_mask) & ~umask); spin_unlock(&fsn_mark->lock); return oldmask & ~newmask; } static int fanotify_remove_mark(struct fsnotify_group *group, void *obj, unsigned int obj_type, __u32 mask, unsigned int flags, __u32 umask) { struct fsnotify_mark *fsn_mark = NULL; __u32 removed; int destroy_mark; fsnotify_group_lock(group); fsn_mark = fsnotify_find_mark(obj, obj_type, group); if (!fsn_mark) { fsnotify_group_unlock(group); return -ENOENT; } removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags, umask, &destroy_mark); if (removed & fsnotify_conn_mask(fsn_mark->connector)) fsnotify_recalc_mask(fsn_mark->connector); if (destroy_mark) fsnotify_detach_mark(fsn_mark); fsnotify_group_unlock(group); if (destroy_mark) fsnotify_free_mark(fsn_mark); /* matches the fsnotify_find_mark() */ fsnotify_put_mark(fsn_mark); return 0; } static bool fanotify_mark_update_flags(struct fsnotify_mark *fsn_mark, unsigned int fan_flags) { bool want_iref = !(fan_flags & FAN_MARK_EVICTABLE); unsigned int ignore = fan_flags & FANOTIFY_MARK_IGNORE_BITS; bool recalc = false; /* * When using FAN_MARK_IGNORE for the first time, mark starts using * independent event flags in ignore mask. After that, trying to * update the ignore mask with the old FAN_MARK_IGNORED_MASK API * will result in EEXIST error. */ if (ignore == FAN_MARK_IGNORE) fsn_mark->flags |= FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS; /* * Setting FAN_MARK_IGNORED_SURV_MODIFY for the first time may lead to * the removal of the FS_MODIFY bit in calculated mask if it was set * because of an ignore mask that is now going to survive FS_MODIFY. */ if (ignore && (fan_flags & FAN_MARK_IGNORED_SURV_MODIFY) && !(fsn_mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY)) { fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY; if (!(fsn_mark->mask & FS_MODIFY)) recalc = true; } if (fsn_mark->connector->type != FSNOTIFY_OBJ_TYPE_INODE || want_iref == !(fsn_mark->flags & FSNOTIFY_MARK_FLAG_NO_IREF)) return recalc; /* * NO_IREF may be removed from a mark, but not added. * When removed, fsnotify_recalc_mask() will take the inode ref. */ WARN_ON_ONCE(!want_iref); fsn_mark->flags &= ~FSNOTIFY_MARK_FLAG_NO_IREF; return true; } static bool fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark, __u32 mask, unsigned int fan_flags) { bool recalc; spin_lock(&fsn_mark->lock); if (!(fan_flags & FANOTIFY_MARK_IGNORE_BITS)) fsn_mark->mask |= mask; else fsn_mark->ignore_mask |= mask; recalc = fsnotify_calc_mask(fsn_mark) & ~fsnotify_conn_mask(fsn_mark->connector); recalc |= fanotify_mark_update_flags(fsn_mark, fan_flags); spin_unlock(&fsn_mark->lock); return recalc; } struct fan_fsid { struct super_block *sb; __kernel_fsid_t id; bool weak; }; static int fanotify_set_mark_fsid(struct fsnotify_group *group, struct fsnotify_mark *mark, struct fan_fsid *fsid) { struct fsnotify_mark_connector *conn; struct fsnotify_mark *old; struct super_block *old_sb = NULL; FANOTIFY_MARK(mark)->fsid = fsid->id; mark->flags |= FSNOTIFY_MARK_FLAG_HAS_FSID; if (fsid->weak) mark->flags |= FSNOTIFY_MARK_FLAG_WEAK_FSID; /* First mark added will determine if group is single or multi fsid */ if (list_empty(&group->marks_list)) return 0; /* Find sb of an existing mark */ list_for_each_entry(old, &group->marks_list, g_list) { conn = READ_ONCE(old->connector); if (!conn) continue; old_sb = fsnotify_connector_sb(conn); if (old_sb) break; } /* Only detached marks left? */ if (!old_sb) return 0; /* Do not allow mixing of marks with weak and strong fsid */ if ((mark->flags ^ old->flags) & FSNOTIFY_MARK_FLAG_WEAK_FSID) return -EXDEV; /* Allow mixing of marks with strong fsid from different fs */ if (!fsid->weak) return 0; /* Do not allow mixing marks with weak fsid from different fs */ if (old_sb != fsid->sb) return -EXDEV; /* Do not allow mixing marks from different btrfs sub-volumes */ if (!fanotify_fsid_equal(&FANOTIFY_MARK(old)->fsid, &FANOTIFY_MARK(mark)->fsid)) return -EXDEV; return 0; } static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group, void *obj, unsigned int obj_type, unsigned int fan_flags, struct fan_fsid *fsid) { struct ucounts *ucounts = group->fanotify_data.ucounts; struct fanotify_mark *fan_mark; struct fsnotify_mark *mark; int ret; /* * Enforce per user marks limits per user in all containing user ns. * A group with FAN_UNLIMITED_MARKS does not contribute to mark count * in the limited groups account. */ if (!FAN_GROUP_FLAG(group, FAN_UNLIMITED_MARKS) && !inc_ucount(ucounts->ns, ucounts->uid, UCOUNT_FANOTIFY_MARKS)) return ERR_PTR(-ENOSPC); fan_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL); if (!fan_mark) { ret = -ENOMEM; goto out_dec_ucounts; } mark = &fan_mark->fsn_mark; fsnotify_init_mark(mark, group); if (fan_flags & FAN_MARK_EVICTABLE) mark->flags |= FSNOTIFY_MARK_FLAG_NO_IREF; /* Cache fsid of filesystem containing the marked object */ if (fsid) { ret = fanotify_set_mark_fsid(group, mark, fsid); if (ret) goto out_put_mark; } else { fan_mark->fsid.val[0] = fan_mark->fsid.val[1] = 0; } ret = fsnotify_add_mark_locked(mark, obj, obj_type, 0); if (ret) goto out_put_mark; return mark; out_put_mark: fsnotify_put_mark(mark); out_dec_ucounts: if (!FAN_GROUP_FLAG(group, FAN_UNLIMITED_MARKS)) dec_ucount(ucounts, UCOUNT_FANOTIFY_MARKS); return ERR_PTR(ret); } static int fanotify_group_init_error_pool(struct fsnotify_group *group) { if (mempool_initialized(&group->fanotify_data.error_events_pool)) return 0; return mempool_init_kmalloc_pool(&group->fanotify_data.error_events_pool, FANOTIFY_DEFAULT_FEE_POOL_SIZE, sizeof(struct fanotify_error_event)); } static int fanotify_may_update_existing_mark(struct fsnotify_mark *fsn_mark, __u32 mask, unsigned int fan_flags) { /* * Non evictable mark cannot be downgraded to evictable mark. */ if (fan_flags & FAN_MARK_EVICTABLE && !(fsn_mark->flags & FSNOTIFY_MARK_FLAG_NO_IREF)) return -EEXIST; /* * New ignore mask semantics cannot be downgraded to old semantics. */ if (fan_flags & FAN_MARK_IGNORED_MASK && fsn_mark->flags & FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS) return -EEXIST; /* * An ignore mask that survives modify could never be downgraded to not * survive modify. With new FAN_MARK_IGNORE semantics we make that rule * explicit and return an error when trying to update the ignore mask * without the original FAN_MARK_IGNORED_SURV_MODIFY value. */ if (fan_flags & FAN_MARK_IGNORE && !(fan_flags & FAN_MARK_IGNORED_SURV_MODIFY) && fsn_mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY) return -EEXIST; /* For now pre-content events are not generated for directories */ mask |= fsn_mark->mask; if (mask & FANOTIFY_PRE_CONTENT_EVENTS && mask & FAN_ONDIR) return -EEXIST; return 0; } static int fanotify_add_mark(struct fsnotify_group *group, void *obj, unsigned int obj_type, __u32 mask, unsigned int fan_flags, struct fan_fsid *fsid) { struct fsnotify_mark *fsn_mark; bool recalc; int ret = 0; fsnotify_group_lock(group); fsn_mark = fsnotify_find_mark(obj, obj_type, group); if (!fsn_mark) { fsn_mark = fanotify_add_new_mark(group, obj, obj_type, fan_flags, fsid); if (IS_ERR(fsn_mark)) { fsnotify_group_unlock(group); return PTR_ERR(fsn_mark); } } /* * Check if requested mark flags conflict with an existing mark flags. */ ret = fanotify_may_update_existing_mark(fsn_mark, mask, fan_flags); if (ret) goto out; /* * Error events are pre-allocated per group, only if strictly * needed (i.e. FAN_FS_ERROR was requested). */ if (!(fan_flags & FANOTIFY_MARK_IGNORE_BITS) && (mask & FAN_FS_ERROR)) { ret = fanotify_group_init_error_pool(group); if (ret) goto out; } recalc = fanotify_mark_add_to_mask(fsn_mark, mask, fan_flags); if (recalc) fsnotify_recalc_mask(fsn_mark->connector); out: fsnotify_group_unlock(group); fsnotify_put_mark(fsn_mark); return ret; } static struct fsnotify_event *fanotify_alloc_overflow_event(void) { struct fanotify_event *oevent; oevent = kmalloc(sizeof(*oevent), GFP_KERNEL_ACCOUNT); if (!oevent) return NULL; fanotify_init_event(oevent, 0, FS_Q_OVERFLOW); oevent->type = FANOTIFY_EVENT_TYPE_OVERFLOW; return &oevent->fse; } static struct hlist_head *fanotify_alloc_merge_hash(void) { struct hlist_head *hash; hash = kmalloc(sizeof(struct hlist_head) << FANOTIFY_HTABLE_BITS, GFP_KERNEL_ACCOUNT); if (!hash) return NULL; __hash_init(hash, FANOTIFY_HTABLE_SIZE); return hash; } /* fanotify syscalls */ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) { struct fsnotify_group *group; int f_flags, fd; unsigned int fid_mode = flags & FANOTIFY_FID_BITS; unsigned int class = flags & FANOTIFY_CLASS_BITS; unsigned int internal_flags = 0; struct file *file; pr_debug("%s: flags=%x event_f_flags=%x\n", __func__, flags, event_f_flags); if (!capable(CAP_SYS_ADMIN)) { /* * An unprivileged user can setup an fanotify group with * limited functionality - an unprivileged group is limited to * notification events with file handles and it cannot use * unlimited queue/marks. */ if ((flags & FANOTIFY_ADMIN_INIT_FLAGS) || !fid_mode) return -EPERM; /* * Setting the internal flag FANOTIFY_UNPRIV on the group * prevents setting mount/filesystem marks on this group and * prevents reporting pid and open fd in events. */ internal_flags |= FANOTIFY_UNPRIV; } #ifdef CONFIG_AUDITSYSCALL if (flags & ~(FANOTIFY_INIT_FLAGS | FAN_ENABLE_AUDIT)) #else if (flags & ~FANOTIFY_INIT_FLAGS) #endif return -EINVAL; /* * A pidfd can only be returned for a thread-group leader; thus * FAN_REPORT_PIDFD and FAN_REPORT_TID need to remain mutually * exclusive. */ if ((flags & FAN_REPORT_PIDFD) && (flags & FAN_REPORT_TID)) return -EINVAL; if (event_f_flags & ~FANOTIFY_INIT_ALL_EVENT_F_BITS) return -EINVAL; switch (event_f_flags & O_ACCMODE) { case O_RDONLY: case O_RDWR: case O_WRONLY: break; default: return -EINVAL; } if (fid_mode && class != FAN_CLASS_NOTIF) return -EINVAL; /* * Child name is reported with parent fid so requires dir fid. * We can report both child fid and dir fid with or without name. */ if ((fid_mode & FAN_REPORT_NAME) && !(fid_mode & FAN_REPORT_DIR_FID)) return -EINVAL; /* * FAN_REPORT_TARGET_FID requires FAN_REPORT_NAME and FAN_REPORT_FID * and is used as an indication to report both dir and child fid on all * dirent events. */ if ((fid_mode & FAN_REPORT_TARGET_FID) && (!(fid_mode & FAN_REPORT_NAME) || !(fid_mode & FAN_REPORT_FID))) return -EINVAL; f_flags = O_RDWR; if (flags & FAN_CLOEXEC) f_flags |= O_CLOEXEC; if (flags & FAN_NONBLOCK) f_flags |= O_NONBLOCK; /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */ group = fsnotify_alloc_group(&fanotify_fsnotify_ops, FSNOTIFY_GROUP_USER); if (IS_ERR(group)) { return PTR_ERR(group); } /* Enforce groups limits per user in all containing user ns */ group->fanotify_data.ucounts = inc_ucount(current_user_ns(), current_euid(), UCOUNT_FANOTIFY_GROUPS); if (!group->fanotify_data.ucounts) { fd = -EMFILE; goto out_destroy_group; } group->fanotify_data.flags = flags | internal_flags; group->memcg = get_mem_cgroup_from_mm(current->mm); group->fanotify_data.merge_hash = fanotify_alloc_merge_hash(); if (!group->fanotify_data.merge_hash) { fd = -ENOMEM; goto out_destroy_group; } group->overflow_event = fanotify_alloc_overflow_event(); if (unlikely(!group->overflow_event)) { fd = -ENOMEM; goto out_destroy_group; } if (force_o_largefile()) event_f_flags |= O_LARGEFILE; group->fanotify_data.f_flags = event_f_flags; init_waitqueue_head(&group->fanotify_data.access_waitq); INIT_LIST_HEAD(&group->fanotify_data.access_list); switch (class) { case FAN_CLASS_NOTIF: group->priority = FSNOTIFY_PRIO_NORMAL; break; case FAN_CLASS_CONTENT: group->priority = FSNOTIFY_PRIO_CONTENT; break; case FAN_CLASS_PRE_CONTENT: group->priority = FSNOTIFY_PRIO_PRE_CONTENT; break; default: fd = -EINVAL; goto out_destroy_group; } if (flags & FAN_UNLIMITED_QUEUE) { fd = -EPERM; if (!capable(CAP_SYS_ADMIN)) goto out_destroy_group; group->max_events = UINT_MAX; } else { group->max_events = fanotify_max_queued_events; } if (flags & FAN_UNLIMITED_MARKS) { fd = -EPERM; if (!capable(CAP_SYS_ADMIN)) goto out_destroy_group; } if (flags & FAN_ENABLE_AUDIT) { fd = -EPERM; if (!capable(CAP_AUDIT_WRITE)) goto out_destroy_group; } fd = get_unused_fd_flags(f_flags); if (fd < 0) goto out_destroy_group; file = anon_inode_getfile_fmode("[fanotify]", &fanotify_fops, group, f_flags, FMODE_NONOTIFY); if (IS_ERR(file)) { put_unused_fd(fd); fd = PTR_ERR(file); goto out_destroy_group; } fd_install(fd, file); return fd; out_destroy_group: fsnotify_destroy_group(group); return fd; } static int fanotify_test_fsid(struct dentry *dentry, unsigned int flags, struct fan_fsid *fsid) { unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS; __kernel_fsid_t root_fsid; int err; /* * Make sure dentry is not of a filesystem with zero fsid (e.g. fuse). */ err = vfs_get_fsid(dentry, &fsid->id); if (err) return err; fsid->sb = dentry->d_sb; if (!fsid->id.val[0] && !fsid->id.val[1]) { err = -ENODEV; goto weak; } /* * Make sure dentry is not of a filesystem subvolume (e.g. btrfs) * which uses a different fsid than sb root. */ err = vfs_get_fsid(dentry->d_sb->s_root, &root_fsid); if (err) return err; if (!fanotify_fsid_equal(&root_fsid, &fsid->id)) { err = -EXDEV; goto weak; } fsid->weak = false; return 0; weak: /* Allow weak fsid when marking inodes */ fsid->weak = true; return (mark_type == FAN_MARK_INODE) ? 0 : err; } /* Check if filesystem can encode a unique fid */ static int fanotify_test_fid(struct dentry *dentry, unsigned int flags) { unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS; const struct export_operations *nop = dentry->d_sb->s_export_op; /* * We need to make sure that the filesystem supports encoding of * file handles so user can use name_to_handle_at() to compare fids * reported with events to the file handle of watched objects. */ if (!exportfs_can_encode_fid(nop)) return -EOPNOTSUPP; /* * For sb/mount mark, we also need to make sure that the filesystem * supports decoding file handles, so user has a way to map back the * reported fids to filesystem objects. */ if (mark_type != FAN_MARK_INODE && !exportfs_can_decode_fh(nop)) return -EOPNOTSUPP; return 0; } static int fanotify_events_supported(struct fsnotify_group *group, const struct path *path, __u64 mask, unsigned int flags) { unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS; bool is_dir = d_is_dir(path->dentry); /* Strict validation of events in non-dir inode mask with v5.17+ APIs */ bool strict_dir_events = FAN_GROUP_FLAG(group, FAN_REPORT_TARGET_FID) || (mask & FAN_RENAME) || (flags & FAN_MARK_IGNORE); /* * Filesystems need to opt-into pre-content evnets (a.k.a HSM) * and they are only supported on regular files and directories. */ if (mask & FANOTIFY_PRE_CONTENT_EVENTS) { if (!(path->mnt->mnt_sb->s_iflags & SB_I_ALLOW_HSM)) return -EOPNOTSUPP; if (!is_dir && !d_is_reg(path->dentry)) return -EINVAL; } /* * Some filesystems such as 'proc' acquire unusual locks when opening * files. For them fanotify permission events have high chances of * deadlocking the system - open done when reporting fanotify event * blocks on this "unusual" lock while another process holding the lock * waits for fanotify permission event to be answered. Just disallow * permission events for such filesystems. */ if (mask & FANOTIFY_PERM_EVENTS && path->mnt->mnt_sb->s_type->fs_flags & FS_DISALLOW_NOTIFY_PERM) return -EINVAL; /* * mount and sb marks are not allowed on kernel internal pseudo fs, * like pipe_mnt, because that would subscribe to events on all the * anonynous pipes in the system. * * SB_NOUSER covers all of the internal pseudo fs whose objects are not * exposed to user's mount namespace, but there are other SB_KERNMOUNT * fs, like nsfs, debugfs, for which the value of allowing sb and mount * mark is questionable. For now we leave them alone. */ if (mark_type != FAN_MARK_INODE && path->mnt->mnt_sb->s_flags & SB_NOUSER) return -EINVAL; /* * We shouldn't have allowed setting dirent events and the directory * flags FAN_ONDIR and FAN_EVENT_ON_CHILD in mask of non-dir inode, * but because we always allowed it, error only when using new APIs. */ if (strict_dir_events && mark_type == FAN_MARK_INODE && !is_dir && (mask & FANOTIFY_DIRONLY_EVENT_BITS)) return -ENOTDIR; return 0; } static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask, int dfd, const char __user *pathname) { struct inode *inode = NULL; struct vfsmount *mnt = NULL; struct fsnotify_group *group; struct path path; struct fan_fsid __fsid, *fsid = NULL; u32 valid_mask = FANOTIFY_EVENTS | FANOTIFY_EVENT_FLAGS; unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS; unsigned int mark_cmd = flags & FANOTIFY_MARK_CMD_BITS; unsigned int ignore = flags & FANOTIFY_MARK_IGNORE_BITS; unsigned int obj_type, fid_mode; void *obj; u32 umask = 0; int ret; pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n", __func__, fanotify_fd, flags, dfd, pathname, mask); /* we only use the lower 32 bits as of right now. */ if (upper_32_bits(mask)) return -EINVAL; if (flags & ~FANOTIFY_MARK_FLAGS) return -EINVAL; switch (mark_type) { case FAN_MARK_INODE: obj_type = FSNOTIFY_OBJ_TYPE_INODE; break; case FAN_MARK_MOUNT: obj_type = FSNOTIFY_OBJ_TYPE_VFSMOUNT; break; case FAN_MARK_FILESYSTEM: obj_type = FSNOTIFY_OBJ_TYPE_SB; break; default: return -EINVAL; } switch (mark_cmd) { case FAN_MARK_ADD: case FAN_MARK_REMOVE: if (!mask) return -EINVAL; break; case FAN_MARK_FLUSH: if (flags & ~(FANOTIFY_MARK_TYPE_BITS | FAN_MARK_FLUSH)) return -EINVAL; break; default: return -EINVAL; } if (IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS)) valid_mask |= FANOTIFY_PERM_EVENTS; if (mask & ~valid_mask) return -EINVAL; /* We don't allow FAN_MARK_IGNORE & FAN_MARK_IGNORED_MASK together */ if (ignore == (FAN_MARK_IGNORE | FAN_MARK_IGNORED_MASK)) return -EINVAL; /* * Event flags (FAN_ONDIR, FAN_EVENT_ON_CHILD) have no effect with * FAN_MARK_IGNORED_MASK. */ if (ignore == FAN_MARK_IGNORED_MASK) { mask &= ~FANOTIFY_EVENT_FLAGS; umask = FANOTIFY_EVENT_FLAGS; } CLASS(fd, f)(fanotify_fd); if (fd_empty(f)) return -EBADF; /* verify that this is indeed an fanotify instance */ if (unlikely(fd_file(f)->f_op != &fanotify_fops)) return -EINVAL; group = fd_file(f)->private_data; /* * An unprivileged user is not allowed to setup mount nor filesystem * marks. This also includes setting up such marks by a group that * was initialized by an unprivileged user. */ if ((!capable(CAP_SYS_ADMIN) || FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV)) && mark_type != FAN_MARK_INODE) return -EPERM; /* * Permission events are not allowed for FAN_CLASS_NOTIF. * Pre-content permission events are not allowed for FAN_CLASS_CONTENT. */ if (mask & FANOTIFY_PERM_EVENTS && group->priority == FSNOTIFY_PRIO_NORMAL) return -EINVAL; else if (mask & FANOTIFY_PRE_CONTENT_EVENTS && group->priority == FSNOTIFY_PRIO_CONTENT) return -EINVAL; if (mask & FAN_FS_ERROR && mark_type != FAN_MARK_FILESYSTEM) return -EINVAL; /* * Evictable is only relevant for inode marks, because only inode object * can be evicted on memory pressure. */ if (flags & FAN_MARK_EVICTABLE && mark_type != FAN_MARK_INODE) return -EINVAL; /* * Events that do not carry enough information to report * event->fd require a group that supports reporting fid. Those * events are not supported on a mount mark, because they do not * carry enough information (i.e. path) to be filtered by mount * point. */ fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS); if (mask & ~(FANOTIFY_FD_EVENTS|FANOTIFY_EVENT_FLAGS) && (!fid_mode || mark_type == FAN_MARK_MOUNT)) return -EINVAL; /* * FAN_RENAME uses special info type records to report the old and * new parent+name. Reporting only old and new parent id is less * useful and was not implemented. */ if (mask & FAN_RENAME && !(fid_mode & FAN_REPORT_NAME)) return -EINVAL; /* Pre-content events are not currently generated for directories. */ if (mask & FANOTIFY_PRE_CONTENT_EVENTS && mask & FAN_ONDIR) return -EINVAL; if (mark_cmd == FAN_MARK_FLUSH) { if (mark_type == FAN_MARK_MOUNT) fsnotify_clear_vfsmount_marks_by_group(group); else if (mark_type == FAN_MARK_FILESYSTEM) fsnotify_clear_sb_marks_by_group(group); else fsnotify_clear_inode_marks_by_group(group); return 0; } ret = fanotify_find_path(dfd, pathname, &path, flags, (mask & ALL_FSNOTIFY_EVENTS), obj_type); if (ret) return ret; if (mark_cmd == FAN_MARK_ADD) { ret = fanotify_events_supported(group, &path, mask, flags); if (ret) goto path_put_and_out; } if (fid_mode) { ret = fanotify_test_fsid(path.dentry, flags, &__fsid); if (ret) goto path_put_and_out; ret = fanotify_test_fid(path.dentry, flags); if (ret) goto path_put_and_out; fsid = &__fsid; } /* inode held in place by reference to path; group by fget on fd */ if (mark_type == FAN_MARK_INODE) { inode = path.dentry->d_inode; obj = inode; } else { mnt = path.mnt; if (mark_type == FAN_MARK_MOUNT) obj = mnt; else obj = mnt->mnt_sb; } /* * If some other task has this inode open for write we should not add * an ignore mask, unless that ignore mask is supposed to survive * modification changes anyway. */ if (mark_cmd == FAN_MARK_ADD && (flags & FANOTIFY_MARK_IGNORE_BITS) && !(flags & FAN_MARK_IGNORED_SURV_MODIFY)) { ret = mnt ? -EINVAL : -EISDIR; /* FAN_MARK_IGNORE requires SURV_MODIFY for sb/mount/dir marks */ if (ignore == FAN_MARK_IGNORE && (mnt || S_ISDIR(inode->i_mode))) goto path_put_and_out; ret = 0; if (inode && inode_is_open_for_write(inode)) goto path_put_and_out; } /* Mask out FAN_EVENT_ON_CHILD flag for sb/mount/non-dir marks */ if (mnt || !S_ISDIR(inode->i_mode)) { mask &= ~FAN_EVENT_ON_CHILD; umask = FAN_EVENT_ON_CHILD; /* * If group needs to report parent fid, register for getting * events with parent/name info for non-directory. */ if ((fid_mode & FAN_REPORT_DIR_FID) && (flags & FAN_MARK_ADD) && !ignore) mask |= FAN_EVENT_ON_CHILD; } /* create/update an inode mark */ switch (mark_cmd) { case FAN_MARK_ADD: ret = fanotify_add_mark(group, obj, obj_type, mask, flags, fsid); break; case FAN_MARK_REMOVE: ret = fanotify_remove_mark(group, obj, obj_type, mask, flags, umask); break; default: ret = -EINVAL; } path_put_and_out: path_put(&path); return ret; } #ifndef CONFIG_ARCH_SPLIT_ARG64 SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags, __u64, mask, int, dfd, const char __user *, pathname) { return do_fanotify_mark(fanotify_fd, flags, mask, dfd, pathname); } #endif #if defined(CONFIG_ARCH_SPLIT_ARG64) || defined(CONFIG_COMPAT) SYSCALL32_DEFINE6(fanotify_mark, int, fanotify_fd, unsigned int, flags, SC_ARG64(mask), int, dfd, const char __user *, pathname) { return do_fanotify_mark(fanotify_fd, flags, SC_VAL64(__u64, mask), dfd, pathname); } #endif /* * fanotify_user_setup - Our initialization function. Note that we cannot return * error because we have compiled-in VFS hooks. So an (unlikely) failure here * must result in panic(). */ static int __init fanotify_user_setup(void) { struct sysinfo si; int max_marks; si_meminfo(&si); /* * Allow up to 1% of addressable memory to be accounted for per user * marks limited to the range [8192, 1048576]. mount and sb marks are * a lot cheaper than inode marks, but there is no reason for a user * to have many of those, so calculate by the cost of inode marks. */ max_marks = (((si.totalram - si.totalhigh) / 100) << PAGE_SHIFT) / INODE_MARK_COST; max_marks = clamp(max_marks, FANOTIFY_OLD_DEFAULT_MAX_MARKS, FANOTIFY_DEFAULT_MAX_USER_MARKS); BUILD_BUG_ON(FANOTIFY_INIT_FLAGS & FANOTIFY_INTERNAL_GROUP_FLAGS); BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 13); BUILD_BUG_ON(HWEIGHT32(FANOTIFY_MARK_FLAGS) != 11); fanotify_mark_cache = KMEM_CACHE(fanotify_mark, SLAB_PANIC|SLAB_ACCOUNT); fanotify_fid_event_cachep = KMEM_CACHE(fanotify_fid_event, SLAB_PANIC); fanotify_path_event_cachep = KMEM_CACHE(fanotify_path_event, SLAB_PANIC); if (IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS)) { fanotify_perm_event_cachep = KMEM_CACHE(fanotify_perm_event, SLAB_PANIC); } fanotify_max_queued_events = FANOTIFY_DEFAULT_MAX_EVENTS; init_user_ns.ucount_max[UCOUNT_FANOTIFY_GROUPS] = FANOTIFY_DEFAULT_MAX_GROUPS; init_user_ns.ucount_max[UCOUNT_FANOTIFY_MARKS] = max_marks; fanotify_sysctls_init(); return 0; } device_initcall(fanotify_user_setup);
124 123 124 123 69 69 69 69 69 69 13 6 2 2 6 6 5 6 6 2 3 1 4 6 1 24 24 2 2 1 26 26 26 2 2 26 2 2 2 2 15 15 15 43 43 15 11 43 43 1 42 15 15 15 15 42 43 62 11 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 // SPDX-License-Identifier: GPL-2.0-only /* * fs/kernfs/mount.c - kernfs mount implementation * * Copyright (c) 2001-3 Patrick Mochel * Copyright (c) 2007 SUSE Linux Products GmbH * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org> */ #include <linux/fs.h> #include <linux/mount.h> #include <linux/init.h> #include <linux/magic.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/namei.h> #include <linux/seq_file.h> #include <linux/exportfs.h> #include <linux/uuid.h> #include <linux/statfs.h> #include "kernfs-internal.h" struct kmem_cache *kernfs_node_cache __ro_after_init; struct kmem_cache *kernfs_iattrs_cache __ro_after_init; struct kernfs_global_locks *kernfs_locks __ro_after_init; static int kernfs_sop_show_options(struct seq_file *sf, struct dentry *dentry) { struct kernfs_root *root = kernfs_root(kernfs_dentry_node(dentry)); struct kernfs_syscall_ops *scops = root->syscall_ops; if (scops && scops->show_options) return scops->show_options(sf, root); return 0; } static int kernfs_sop_show_path(struct seq_file *sf, struct dentry *dentry) { struct kernfs_node *node = kernfs_dentry_node(dentry); struct kernfs_root *root = kernfs_root(node); struct kernfs_syscall_ops *scops = root->syscall_ops; if (scops && scops->show_path) return scops->show_path(sf, node, root); seq_dentry(sf, dentry, " \t\n\\"); return 0; } static int kernfs_statfs(struct dentry *dentry, struct kstatfs *buf) { simple_statfs(dentry, buf); buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b); return 0; } const struct super_operations kernfs_sops = { .statfs = kernfs_statfs, .drop_inode = generic_delete_inode, .evict_inode = kernfs_evict_inode, .show_options = kernfs_sop_show_options, .show_path = kernfs_sop_show_path, }; static int kernfs_encode_fh(struct inode *inode, __u32 *fh, int *max_len, struct inode *parent) { struct kernfs_node *kn = inode->i_private; if (*max_len < 2) { *max_len = 2; return FILEID_INVALID; } *max_len = 2; *(u64 *)fh = kn->id; return FILEID_KERNFS; } static struct dentry *__kernfs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type, bool get_parent) { struct kernfs_super_info *info = kernfs_info(sb); struct kernfs_node *kn; struct inode *inode; u64 id; if (fh_len < 2) return NULL; switch (fh_type) { case FILEID_KERNFS: id = *(u64 *)fid; break; case FILEID_INO32_GEN: case FILEID_INO32_GEN_PARENT: /* * blk_log_action() exposes "LOW32,HIGH32" pair without * type and userland can call us with generic fid * constructed from them. Combine it back to ID. See * blk_log_action(). */ id = ((u64)fid->i32.gen << 32) | fid->i32.ino; break; default: return NULL; } kn = kernfs_find_and_get_node_by_id(info->root, id); if (!kn) return ERR_PTR(-ESTALE); if (get_parent) { struct kernfs_node *parent; parent = kernfs_get_parent(kn); kernfs_put(kn); kn = parent; if (!kn) return ERR_PTR(-ESTALE); } inode = kernfs_get_inode(sb, kn); kernfs_put(kn); return d_obtain_alias(inode); } static struct dentry *kernfs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return __kernfs_fh_to_dentry(sb, fid, fh_len, fh_type, false); } static struct dentry *kernfs_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return __kernfs_fh_to_dentry(sb, fid, fh_len, fh_type, true); } static struct dentry *kernfs_get_parent_dentry(struct dentry *child) { struct kernfs_node *kn = kernfs_dentry_node(child); return d_obtain_alias(kernfs_get_inode(child->d_sb, kn->parent)); } static const struct export_operations kernfs_export_ops = { .encode_fh = kernfs_encode_fh, .fh_to_dentry = kernfs_fh_to_dentry, .fh_to_parent = kernfs_fh_to_parent, .get_parent = kernfs_get_parent_dentry, }; /** * kernfs_root_from_sb - determine kernfs_root associated with a super_block * @sb: the super_block in question * * Return: the kernfs_root associated with @sb. If @sb is not a kernfs one, * %NULL is returned. */ struct kernfs_root *kernfs_root_from_sb(struct super_block *sb) { if (sb->s_op == &kernfs_sops) return kernfs_info(sb)->root; return NULL; } /* * find the next ancestor in the path down to @child, where @parent was the * ancestor whose descendant we want to find. * * Say the path is /a/b/c/d. @child is d, @parent is %NULL. We return the root * node. If @parent is b, then we return the node for c. * Passing in d as @parent is not ok. */ static struct kernfs_node *find_next_ancestor(struct kernfs_node *child, struct kernfs_node *parent) { if (child == parent) { pr_crit_once("BUG in find_next_ancestor: called with parent == child"); return NULL; } while (child->parent != parent) { if (!child->parent) return NULL; child = child->parent; } return child; } /** * kernfs_node_dentry - get a dentry for the given kernfs_node * @kn: kernfs_node for which a dentry is needed * @sb: the kernfs super_block * * Return: the dentry pointer */ struct dentry *kernfs_node_dentry(struct kernfs_node *kn, struct super_block *sb) { struct dentry *dentry; struct kernfs_node *knparent; BUG_ON(sb->s_op != &kernfs_sops); dentry = dget(sb->s_root); /* Check if this is the root kernfs_node */ if (!kn->parent) return dentry; knparent = find_next_ancestor(kn, NULL); if (WARN_ON(!knparent)) { dput(dentry); return ERR_PTR(-EINVAL); } do { struct dentry *dtmp; struct kernfs_node *kntmp; if (kn == knparent) return dentry; kntmp = find_next_ancestor(kn, knparent); if (WARN_ON(!kntmp)) { dput(dentry); return ERR_PTR(-EINVAL); } dtmp = lookup_positive_unlocked(kntmp->name, dentry, strlen(kntmp->name)); dput(dentry); if (IS_ERR(dtmp)) return dtmp; knparent = kntmp; dentry = dtmp; } while (true); } static int kernfs_fill_super(struct super_block *sb, struct kernfs_fs_context *kfc) { struct kernfs_super_info *info = kernfs_info(sb); struct kernfs_root *kf_root = kfc->root; struct inode *inode; struct dentry *root; info->sb = sb; /* Userspace would break if executables or devices appear on sysfs */ sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV; sb->s_blocksize = PAGE_SIZE; sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = kfc->magic; sb->s_op = &kernfs_sops; sb->s_xattr = kernfs_xattr_handlers; if (info->root->flags & KERNFS_ROOT_SUPPORT_EXPORTOP) sb->s_export_op = &kernfs_export_ops; sb->s_time_gran = 1; /* sysfs dentries and inodes don't require IO to create */ sb->s_shrink->seeks = 0; /* get root inode, initialize and unlock it */ down_read(&kf_root->kernfs_rwsem); inode = kernfs_get_inode(sb, info->root->kn); up_read(&kf_root->kernfs_rwsem); if (!inode) { pr_debug("kernfs: could not get root inode\n"); return -ENOMEM; } /* instantiate and link root dentry */ root = d_make_root(inode); if (!root) { pr_debug("%s: could not get root dentry!\n", __func__); return -ENOMEM; } sb->s_root = root; sb->s_d_op = &kernfs_dops; return 0; } static int kernfs_test_super(struct super_block *sb, struct fs_context *fc) { struct kernfs_super_info *sb_info = kernfs_info(sb); struct kernfs_super_info *info = fc->s_fs_info; return sb_info->root == info->root && sb_info->ns == info->ns; } static int kernfs_set_super(struct super_block *sb, struct fs_context *fc) { struct kernfs_fs_context *kfc = fc->fs_private; kfc->ns_tag = NULL; return set_anon_super_fc(sb, fc); } /** * kernfs_super_ns - determine the namespace tag of a kernfs super_block * @sb: super_block of interest * * Return: the namespace tag associated with kernfs super_block @sb. */ const void *kernfs_super_ns(struct super_block *sb) { struct kernfs_super_info *info = kernfs_info(sb); return info->ns; } /** * kernfs_get_tree - kernfs filesystem access/retrieval helper * @fc: The filesystem context. * * This is to be called from each kernfs user's fs_context->ops->get_tree() * implementation, which should set the specified ->@fs_type and ->@flags, and * specify the hierarchy and namespace tag to mount via ->@root and ->@ns, * respectively. * * Return: %0 on success, -errno on failure. */ int kernfs_get_tree(struct fs_context *fc) { struct kernfs_fs_context *kfc = fc->fs_private; struct super_block *sb; struct kernfs_super_info *info; int error; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->root = kfc->root; info->ns = kfc->ns_tag; INIT_LIST_HEAD(&info->node); fc->s_fs_info = info; sb = sget_fc(fc, kernfs_test_super, kernfs_set_super); if (IS_ERR(sb)) return PTR_ERR(sb); if (!sb->s_root) { struct kernfs_super_info *info = kernfs_info(sb); struct kernfs_root *root = kfc->root; kfc->new_sb_created = true; error = kernfs_fill_super(sb, kfc); if (error) { deactivate_locked_super(sb); return error; } sb->s_flags |= SB_ACTIVE; uuid_t uuid; uuid_gen(&uuid); super_set_uuid(sb, uuid.b, sizeof(uuid)); down_write(&root->kernfs_supers_rwsem); list_add(&info->node, &info->root->supers); up_write(&root->kernfs_supers_rwsem); } fc->root = dget(sb->s_root); return 0; } void kernfs_free_fs_context(struct fs_context *fc) { /* Note that we don't deal with kfc->ns_tag here. */ kfree(fc->s_fs_info); fc->s_fs_info = NULL; } /** * kernfs_kill_sb - kill_sb for kernfs * @sb: super_block being killed * * This can be used directly for file_system_type->kill_sb(). If a kernfs * user needs extra cleanup, it can implement its own kill_sb() and call * this function at the end. */ void kernfs_kill_sb(struct super_block *sb) { struct kernfs_super_info *info = kernfs_info(sb); struct kernfs_root *root = info->root; down_write(&root->kernfs_supers_rwsem); list_del(&info->node); up_write(&root->kernfs_supers_rwsem); /* * Remove the superblock from fs_supers/s_instances * so we can't find it, before freeing kernfs_super_info. */ kill_anon_super(sb); kfree(info); } static void __init kernfs_mutex_init(void) { int count; for (count = 0; count < NR_KERNFS_LOCKS; count++) mutex_init(&kernfs_locks->open_file_mutex[count]); } static void __init kernfs_lock_init(void) { kernfs_locks = kmalloc(sizeof(struct kernfs_global_locks), GFP_KERNEL); WARN_ON(!kernfs_locks); kernfs_mutex_init(); } void __init kernfs_init(void) { kernfs_node_cache = kmem_cache_create("kernfs_node_cache", sizeof(struct kernfs_node), 0, SLAB_PANIC, NULL); /* Creates slab cache for kernfs inode attributes */ kernfs_iattrs_cache = kmem_cache_create("kernfs_iattrs_cache", sizeof(struct kernfs_iattrs), 0, SLAB_PANIC, NULL); kernfs_lock_init(); }
2 1 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 // SPDX-License-Identifier: GPL-2.0+ /* * MCT (Magic Control Technology Corp.) USB RS232 Converter Driver * * Copyright (C) 2000 Wolfgang Grandegger (wolfgang@ces.ch) * * This program is largely derived from the Belkin USB Serial Adapter Driver * (see belkin_sa.[ch]). All of the information about the device was acquired * by using SniffUSB on Windows98. For technical details see mct_u232.h. * * William G. Greathouse and Greg Kroah-Hartman provided great help on how to * do the reverse engineering and how to write a USB serial device driver. * * TO BE DONE, TO BE CHECKED: * DTR/RTS signal handling may be incomplete or incorrect. I have mainly * implemented what I have seen with SniffUSB or found in belkin_sa.c. * For further TODOs check also belkin_sa.c. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/uaccess.h> #include <linux/unaligned.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/serial.h> #include "mct_u232.h" #define DRIVER_AUTHOR "Wolfgang Grandegger <wolfgang@ces.ch>" #define DRIVER_DESC "Magic Control Technology USB-RS232 converter driver" /* * Function prototypes */ static int mct_u232_port_probe(struct usb_serial_port *port); static void mct_u232_port_remove(struct usb_serial_port *remove); static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port); static void mct_u232_close(struct usb_serial_port *port); static void mct_u232_dtr_rts(struct usb_serial_port *port, int on); static void mct_u232_read_int_callback(struct urb *urb); static void mct_u232_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios); static int mct_u232_break_ctl(struct tty_struct *tty, int break_state); static int mct_u232_tiocmget(struct tty_struct *tty); static int mct_u232_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear); static void mct_u232_throttle(struct tty_struct *tty); static void mct_u232_unthrottle(struct tty_struct *tty); /* * All of the device info needed for the MCT USB-RS232 converter. */ static const struct usb_device_id id_table[] = { { USB_DEVICE(MCT_U232_VID, MCT_U232_PID) }, { USB_DEVICE(MCT_U232_VID, MCT_U232_SITECOM_PID) }, { USB_DEVICE(MCT_U232_VID, MCT_U232_DU_H3SP_PID) }, { USB_DEVICE(MCT_U232_BELKIN_F5U109_VID, MCT_U232_BELKIN_F5U109_PID) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table); static struct usb_serial_driver mct_u232_device = { .driver = { .name = "mct_u232", }, .description = "MCT U232", .id_table = id_table, .num_ports = 1, .open = mct_u232_open, .close = mct_u232_close, .dtr_rts = mct_u232_dtr_rts, .throttle = mct_u232_throttle, .unthrottle = mct_u232_unthrottle, .read_int_callback = mct_u232_read_int_callback, .set_termios = mct_u232_set_termios, .break_ctl = mct_u232_break_ctl, .tiocmget = mct_u232_tiocmget, .tiocmset = mct_u232_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, .port_probe = mct_u232_port_probe, .port_remove = mct_u232_port_remove, .get_icount = usb_serial_generic_get_icount, }; static struct usb_serial_driver * const serial_drivers[] = { &mct_u232_device, NULL }; struct mct_u232_private { struct urb *read_urb; spinlock_t lock; unsigned int control_state; /* Modem Line Setting (TIOCM) */ unsigned char last_lcr; /* Line Control Register */ unsigned char last_lsr; /* Line Status Register */ unsigned char last_msr; /* Modem Status Register */ unsigned int rx_flags; /* Throttling flags */ }; #define THROTTLED 0x01 /* * Handle vendor specific USB requests */ #define WDR_TIMEOUT 5000 /* default urb timeout */ /* * Later day 2.6.0-test kernels have new baud rates like B230400 which * we do not know how to support. We ignore them for the moment. */ static int mct_u232_calculate_baud_rate(struct usb_serial *serial, speed_t value, speed_t *result) { *result = value; if (le16_to_cpu(serial->dev->descriptor.idProduct) == MCT_U232_SITECOM_PID || le16_to_cpu(serial->dev->descriptor.idProduct) == MCT_U232_BELKIN_F5U109_PID) { switch (value) { case 300: return 0x01; case 600: return 0x02; /* this one not tested */ case 1200: return 0x03; case 2400: return 0x04; case 4800: return 0x06; case 9600: return 0x08; case 19200: return 0x09; case 38400: return 0x0a; case 57600: return 0x0b; case 115200: return 0x0c; default: *result = 9600; return 0x08; } } else { /* FIXME: Can we use any divider - should we do divider = 115200/value; real baud = 115200/divider */ switch (value) { case 300: break; case 600: break; case 1200: break; case 2400: break; case 4800: break; case 9600: break; case 19200: break; case 38400: break; case 57600: break; case 115200: break; default: value = 9600; *result = 9600; } return 115200/value; } } static int mct_u232_set_baud_rate(struct tty_struct *tty, struct usb_serial *serial, struct usb_serial_port *port, speed_t value) { unsigned int divisor; int rc; unsigned char *buf; unsigned char cts_enable_byte = 0; speed_t speed; buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL); if (buf == NULL) return -ENOMEM; divisor = mct_u232_calculate_baud_rate(serial, value, &speed); put_unaligned_le32(divisor, buf); rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), MCT_U232_SET_BAUD_RATE_REQUEST, MCT_U232_SET_REQUEST_TYPE, 0, 0, buf, MCT_U232_SET_BAUD_RATE_SIZE, WDR_TIMEOUT); if (rc < 0) /*FIXME: What value speed results */ dev_err(&port->dev, "Set BAUD RATE %d failed (error = %d)\n", value, rc); else tty_encode_baud_rate(tty, speed, speed); dev_dbg(&port->dev, "set_baud_rate: value: 0x%x, divisor: 0x%x\n", value, divisor); /* Mimic the MCT-supplied Windows driver (version 1.21P.0104), which always sends two extra USB 'device request' messages after the 'baud rate change' message. The actual functionality of the request codes in these messages is not fully understood but these particular codes are never seen in any operation besides a baud rate change. Both of these messages send a single byte of data. In the first message, the value of this byte is always zero. The second message has been determined experimentally to control whether data will be transmitted to a device which is not asserting the 'CTS' signal. If the second message's data byte is zero, data will be transmitted even if 'CTS' is not asserted (i.e. no hardware flow control). if the second message's data byte is nonzero (a value of 1 is used by this driver), data will not be transmitted to a device which is not asserting 'CTS'. */ buf[0] = 0; rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), MCT_U232_SET_UNKNOWN1_REQUEST, MCT_U232_SET_REQUEST_TYPE, 0, 0, buf, MCT_U232_SET_UNKNOWN1_SIZE, WDR_TIMEOUT); if (rc < 0) dev_err(&port->dev, "Sending USB device request code %d " "failed (error = %d)\n", MCT_U232_SET_UNKNOWN1_REQUEST, rc); if (port && C_CRTSCTS(tty)) cts_enable_byte = 1; dev_dbg(&port->dev, "set_baud_rate: send second control message, data = %02X\n", cts_enable_byte); buf[0] = cts_enable_byte; rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), MCT_U232_SET_CTS_REQUEST, MCT_U232_SET_REQUEST_TYPE, 0, 0, buf, MCT_U232_SET_CTS_SIZE, WDR_TIMEOUT); if (rc < 0) dev_err(&port->dev, "Sending USB device request code %d " "failed (error = %d)\n", MCT_U232_SET_CTS_REQUEST, rc); kfree(buf); return rc; } /* mct_u232_set_baud_rate */ static int mct_u232_set_line_ctrl(struct usb_serial_port *port, unsigned char lcr) { int rc; unsigned char *buf; buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL); if (buf == NULL) return -ENOMEM; buf[0] = lcr; rc = usb_control_msg(port->serial->dev, usb_sndctrlpipe(port->serial->dev, 0), MCT_U232_SET_LINE_CTRL_REQUEST, MCT_U232_SET_REQUEST_TYPE, 0, 0, buf, MCT_U232_SET_LINE_CTRL_SIZE, WDR_TIMEOUT); if (rc < 0) dev_err(&port->dev, "Set LINE CTRL 0x%x failed (error = %d)\n", lcr, rc); dev_dbg(&port->dev, "set_line_ctrl: 0x%x\n", lcr); kfree(buf); return rc; } /* mct_u232_set_line_ctrl */ static int mct_u232_set_modem_ctrl(struct usb_serial_port *port, unsigned int control_state) { int rc; unsigned char mcr; unsigned char *buf; buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL); if (buf == NULL) return -ENOMEM; mcr = MCT_U232_MCR_NONE; if (control_state & TIOCM_DTR) mcr |= MCT_U232_MCR_DTR; if (control_state & TIOCM_RTS) mcr |= MCT_U232_MCR_RTS; buf[0] = mcr; rc = usb_control_msg(port->serial->dev, usb_sndctrlpipe(port->serial->dev, 0), MCT_U232_SET_MODEM_CTRL_REQUEST, MCT_U232_SET_REQUEST_TYPE, 0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE, WDR_TIMEOUT); kfree(buf); dev_dbg(&port->dev, "set_modem_ctrl: state=0x%x ==> mcr=0x%x\n", control_state, mcr); if (rc < 0) { dev_err(&port->dev, "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc); return rc; } return 0; } /* mct_u232_set_modem_ctrl */ static int mct_u232_get_modem_stat(struct usb_serial_port *port, unsigned char *msr) { int rc; unsigned char *buf; buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL); if (buf == NULL) { *msr = 0; return -ENOMEM; } rc = usb_control_msg(port->serial->dev, usb_rcvctrlpipe(port->serial->dev, 0), MCT_U232_GET_MODEM_STAT_REQUEST, MCT_U232_GET_REQUEST_TYPE, 0, 0, buf, MCT_U232_GET_MODEM_STAT_SIZE, WDR_TIMEOUT); if (rc < MCT_U232_GET_MODEM_STAT_SIZE) { dev_err(&port->dev, "Get MODEM STATus failed (error = %d)\n", rc); if (rc >= 0) rc = -EIO; *msr = 0; } else { *msr = buf[0]; } dev_dbg(&port->dev, "get_modem_stat: 0x%x\n", *msr); kfree(buf); return rc; } /* mct_u232_get_modem_stat */ static void mct_u232_msr_to_icount(struct async_icount *icount, unsigned char msr) { /* Translate Control Line states */ if (msr & MCT_U232_MSR_DDSR) icount->dsr++; if (msr & MCT_U232_MSR_DCTS) icount->cts++; if (msr & MCT_U232_MSR_DRI) icount->rng++; if (msr & MCT_U232_MSR_DCD) icount->dcd++; } /* mct_u232_msr_to_icount */ static void mct_u232_msr_to_state(struct usb_serial_port *port, unsigned int *control_state, unsigned char msr) { /* Translate Control Line states */ if (msr & MCT_U232_MSR_DSR) *control_state |= TIOCM_DSR; else *control_state &= ~TIOCM_DSR; if (msr & MCT_U232_MSR_CTS) *control_state |= TIOCM_CTS; else *control_state &= ~TIOCM_CTS; if (msr & MCT_U232_MSR_RI) *control_state |= TIOCM_RI; else *control_state &= ~TIOCM_RI; if (msr & MCT_U232_MSR_CD) *control_state |= TIOCM_CD; else *control_state &= ~TIOCM_CD; dev_dbg(&port->dev, "msr_to_state: msr=0x%x ==> state=0x%x\n", msr, *control_state); } /* mct_u232_msr_to_state */ /* * Driver's tty interface functions */ static int mct_u232_port_probe(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct mct_u232_private *priv; /* check first to simplify error handling */ if (!serial->port[1] || !serial->port[1]->interrupt_in_urb) { dev_err(&port->dev, "expected endpoint missing\n"); return -ENODEV; } priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; /* Use second interrupt-in endpoint for reading. */ priv->read_urb = serial->port[1]->interrupt_in_urb; priv->read_urb->context = port; spin_lock_init(&priv->lock); usb_set_serial_port_data(port, priv); return 0; } static void mct_u232_port_remove(struct usb_serial_port *port) { struct mct_u232_private *priv; priv = usb_get_serial_port_data(port); kfree(priv); } static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct mct_u232_private *priv = usb_get_serial_port_data(port); int retval = 0; unsigned int control_state; unsigned long flags; unsigned char last_lcr; unsigned char last_msr; /* Compensate for a hardware bug: although the Sitecom U232-P25 * device reports a maximum output packet size of 32 bytes, * it seems to be able to accept only 16 bytes (and that's what * SniffUSB says too...) */ if (le16_to_cpu(serial->dev->descriptor.idProduct) == MCT_U232_SITECOM_PID) port->bulk_out_size = 16; /* Do a defined restart: the normal serial device seems to * always turn on DTR and RTS here, so do the same. I'm not * sure if this is really necessary. But it should not harm * either. */ spin_lock_irqsave(&priv->lock, flags); if (tty && C_BAUD(tty)) priv->control_state = TIOCM_DTR | TIOCM_RTS; else priv->control_state = 0; priv->last_lcr = (MCT_U232_DATA_BITS_8 | MCT_U232_PARITY_NONE | MCT_U232_STOP_BITS_1); control_state = priv->control_state; last_lcr = priv->last_lcr; spin_unlock_irqrestore(&priv->lock, flags); mct_u232_set_modem_ctrl(port, control_state); mct_u232_set_line_ctrl(port, last_lcr); /* Read modem status and update control state */ mct_u232_get_modem_stat(port, &last_msr); spin_lock_irqsave(&priv->lock, flags); priv->last_msr = last_msr; mct_u232_msr_to_state(port, &priv->control_state, priv->last_msr); spin_unlock_irqrestore(&priv->lock, flags); retval = usb_submit_urb(priv->read_urb, GFP_KERNEL); if (retval) { dev_err(&port->dev, "usb_submit_urb(read) failed pipe 0x%x err %d\n", port->read_urb->pipe, retval); goto error; } retval = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (retval) { usb_kill_urb(priv->read_urb); dev_err(&port->dev, "usb_submit_urb(read int) failed pipe 0x%x err %d", port->interrupt_in_urb->pipe, retval); goto error; } return 0; error: return retval; } /* mct_u232_open */ static void mct_u232_dtr_rts(struct usb_serial_port *port, int on) { unsigned int control_state; struct mct_u232_private *priv = usb_get_serial_port_data(port); spin_lock_irq(&priv->lock); if (on) priv->control_state |= TIOCM_DTR | TIOCM_RTS; else priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS); control_state = priv->control_state; spin_unlock_irq(&priv->lock); mct_u232_set_modem_ctrl(port, control_state); } static void mct_u232_close(struct usb_serial_port *port) { struct mct_u232_private *priv = usb_get_serial_port_data(port); usb_kill_urb(priv->read_urb); usb_kill_urb(port->interrupt_in_urb); usb_serial_generic_close(port); } /* mct_u232_close */ static void mct_u232_read_int_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; struct mct_u232_private *priv = usb_get_serial_port_data(port); unsigned char *data = urb->transfer_buffer; int retval; int status = urb->status; unsigned long flags; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(&port->dev, "%s - urb shutting down with status: %d\n", __func__, status); return; default: dev_dbg(&port->dev, "%s - nonzero urb status received: %d\n", __func__, status); goto exit; } usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data); /* * Work-a-round: handle the 'usual' bulk-in pipe here */ if (urb->transfer_buffer_length > 2) { if (urb->actual_length) { tty_insert_flip_string(&port->port, data, urb->actual_length); tty_flip_buffer_push(&port->port); } goto exit; } /* * The interrupt-in pipe signals exceptional conditions (modem line * signal changes and errors). data[0] holds MSR, data[1] holds LSR. */ spin_lock_irqsave(&priv->lock, flags); priv->last_msr = data[MCT_U232_MSR_INDEX]; /* Record Control Line states */ mct_u232_msr_to_state(port, &priv->control_state, priv->last_msr); mct_u232_msr_to_icount(&port->icount, priv->last_msr); #if 0 /* Not yet handled. See belkin_sa.c for further information */ /* Now to report any errors */ priv->last_lsr = data[MCT_U232_LSR_INDEX]; /* * fill in the flip buffer here, but I do not know the relation * to the current/next receive buffer or characters. I need * to look in to this before committing any code. */ if (priv->last_lsr & MCT_U232_LSR_ERR) { tty = tty_port_tty_get(&port->port); /* Overrun Error */ if (priv->last_lsr & MCT_U232_LSR_OE) { } /* Parity Error */ if (priv->last_lsr & MCT_U232_LSR_PE) { } /* Framing Error */ if (priv->last_lsr & MCT_U232_LSR_FE) { } /* Break Indicator */ if (priv->last_lsr & MCT_U232_LSR_BI) { } tty_kref_put(tty); } #endif wake_up_interruptible(&port->port.delta_msr_wait); spin_unlock_irqrestore(&priv->lock, flags); exit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&port->dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval); } /* mct_u232_read_int_callback */ static void mct_u232_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { struct usb_serial *serial = port->serial; struct mct_u232_private *priv = usb_get_serial_port_data(port); struct ktermios *termios = &tty->termios; unsigned int cflag = termios->c_cflag; unsigned int old_cflag = old_termios->c_cflag; unsigned long flags; unsigned int control_state; unsigned char last_lcr; /* get a local copy of the current port settings */ spin_lock_irqsave(&priv->lock, flags); control_state = priv->control_state; spin_unlock_irqrestore(&priv->lock, flags); last_lcr = 0; /* * Update baud rate. * Do not attempt to cache old rates and skip settings, * disconnects screw such tricks up completely. * Premature optimization is the root of all evil. */ /* reassert DTR and RTS on transition from B0 */ if ((old_cflag & CBAUD) == B0) { dev_dbg(&port->dev, "%s: baud was B0\n", __func__); control_state |= TIOCM_DTR | TIOCM_RTS; mct_u232_set_modem_ctrl(port, control_state); } mct_u232_set_baud_rate(tty, serial, port, tty_get_baud_rate(tty)); if ((cflag & CBAUD) == B0) { dev_dbg(&port->dev, "%s: baud is B0\n", __func__); /* Drop RTS and DTR */ control_state &= ~(TIOCM_DTR | TIOCM_RTS); mct_u232_set_modem_ctrl(port, control_state); } /* * Update line control register (LCR) */ /* set the parity */ if (cflag & PARENB) last_lcr |= (cflag & PARODD) ? MCT_U232_PARITY_ODD : MCT_U232_PARITY_EVEN; else last_lcr |= MCT_U232_PARITY_NONE; /* set the number of data bits */ switch (cflag & CSIZE) { case CS5: last_lcr |= MCT_U232_DATA_BITS_5; break; case CS6: last_lcr |= MCT_U232_DATA_BITS_6; break; case CS7: last_lcr |= MCT_U232_DATA_BITS_7; break; case CS8: last_lcr |= MCT_U232_DATA_BITS_8; break; default: dev_err(&port->dev, "CSIZE was not CS5-CS8, using default of 8\n"); last_lcr |= MCT_U232_DATA_BITS_8; break; } termios->c_cflag &= ~CMSPAR; /* set the number of stop bits */ last_lcr |= (cflag & CSTOPB) ? MCT_U232_STOP_BITS_2 : MCT_U232_STOP_BITS_1; mct_u232_set_line_ctrl(port, last_lcr); /* save off the modified port settings */ spin_lock_irqsave(&priv->lock, flags); priv->control_state = control_state; priv->last_lcr = last_lcr; spin_unlock_irqrestore(&priv->lock, flags); } /* mct_u232_set_termios */ static int mct_u232_break_ctl(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct mct_u232_private *priv = usb_get_serial_port_data(port); unsigned char lcr; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); lcr = priv->last_lcr; if (break_state) lcr |= MCT_U232_SET_BREAK; spin_unlock_irqrestore(&priv->lock, flags); return mct_u232_set_line_ctrl(port, lcr); } /* mct_u232_break_ctl */ static int mct_u232_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct mct_u232_private *priv = usb_get_serial_port_data(port); unsigned int control_state; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); control_state = priv->control_state; spin_unlock_irqrestore(&priv->lock, flags); return control_state; } static int mct_u232_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct mct_u232_private *priv = usb_get_serial_port_data(port); unsigned int control_state; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); control_state = priv->control_state; if (set & TIOCM_RTS) control_state |= TIOCM_RTS; if (set & TIOCM_DTR) control_state |= TIOCM_DTR; if (clear & TIOCM_RTS) control_state &= ~TIOCM_RTS; if (clear & TIOCM_DTR) control_state &= ~TIOCM_DTR; priv->control_state = control_state; spin_unlock_irqrestore(&priv->lock, flags); return mct_u232_set_modem_ctrl(port, control_state); } static void mct_u232_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct mct_u232_private *priv = usb_get_serial_port_data(port); unsigned int control_state; spin_lock_irq(&priv->lock); priv->rx_flags |= THROTTLED; if (C_CRTSCTS(tty)) { priv->control_state &= ~TIOCM_RTS; control_state = priv->control_state; spin_unlock_irq(&priv->lock); mct_u232_set_modem_ctrl(port, control_state); } else { spin_unlock_irq(&priv->lock); } } static void mct_u232_unthrottle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct mct_u232_private *priv = usb_get_serial_port_data(port); unsigned int control_state; spin_lock_irq(&priv->lock); if ((priv->rx_flags & THROTTLED) && C_CRTSCTS(tty)) { priv->rx_flags &= ~THROTTLED; priv->control_state |= TIOCM_RTS; control_state = priv->control_state; spin_unlock_irq(&priv->lock); mct_u232_set_modem_ctrl(port, control_state); } else { spin_unlock_irq(&priv->lock); } } module_usb_serial_driver(serial_drivers, id_table); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
75 76 63 63 63 62 62 62 62 2 2 2 1 2 2 21 21 21 3 21 21 75 12 1 12 51 11 11 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 // SPDX-License-Identifier: GPL-2.0-only /* * net/dccp/ccid.c * * An implementation of the DCCP protocol * Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * CCID infrastructure */ #include <linux/slab.h> #include "ccid.h" #include "ccids/lib/tfrc.h" static struct ccid_operations *ccids[] = { &ccid2_ops, #ifdef CONFIG_IP_DCCP_CCID3 &ccid3_ops, #endif }; static struct ccid_operations *ccid_by_number(const u8 id) { int i; for (i = 0; i < ARRAY_SIZE(ccids); i++) if (ccids[i]->ccid_id == id) return ccids[i]; return NULL; } /* check that up to @array_len members in @ccid_array are supported */ bool ccid_support_check(u8 const *ccid_array, u8 array_len) { while (array_len > 0) if (ccid_by_number(ccid_array[--array_len]) == NULL) return false; return true; } /** * ccid_get_builtin_ccids - Populate a list of built-in CCIDs * @ccid_array: pointer to copy into * @array_len: value to return length into * * This function allocates memory - caller must see that it is freed after use. */ int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len) { *ccid_array = kmalloc(ARRAY_SIZE(ccids), gfp_any()); if (*ccid_array == NULL) return -ENOBUFS; for (*array_len = 0; *array_len < ARRAY_SIZE(ccids); *array_len += 1) (*ccid_array)[*array_len] = ccids[*array_len]->ccid_id; return 0; } int ccid_getsockopt_builtin_ccids(struct sock *sk, int len, char __user *optval, int __user *optlen) { u8 *ccid_array, array_len; int err = 0; if (ccid_get_builtin_ccids(&ccid_array, &array_len)) return -ENOBUFS; if (put_user(array_len, optlen)) err = -EFAULT; else if (len > 0 && copy_to_user(optval, ccid_array, len > array_len ? array_len : len)) err = -EFAULT; kfree(ccid_array); return err; } static __printf(3, 4) struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_fmt, const char *fmt,...) { struct kmem_cache *slab; va_list args; va_start(args, fmt); vsnprintf(slab_name_fmt, CCID_SLAB_NAME_LENGTH, fmt, args); va_end(args); slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0, SLAB_HWCACHE_ALIGN, NULL); return slab; } static void ccid_kmem_cache_destroy(struct kmem_cache *slab) { kmem_cache_destroy(slab); } static int __init ccid_activate(struct ccid_operations *ccid_ops) { int err = -ENOBUFS; ccid_ops->ccid_hc_rx_slab = ccid_kmem_cache_create(ccid_ops->ccid_hc_rx_obj_size, ccid_ops->ccid_hc_rx_slab_name, "ccid%u_hc_rx_sock", ccid_ops->ccid_id); if (ccid_ops->ccid_hc_rx_slab == NULL) goto out; ccid_ops->ccid_hc_tx_slab = ccid_kmem_cache_create(ccid_ops->ccid_hc_tx_obj_size, ccid_ops->ccid_hc_tx_slab_name, "ccid%u_hc_tx_sock", ccid_ops->ccid_id); if (ccid_ops->ccid_hc_tx_slab == NULL) goto out_free_rx_slab; pr_info("DCCP: Activated CCID %d (%s)\n", ccid_ops->ccid_id, ccid_ops->ccid_name); err = 0; out: return err; out_free_rx_slab: ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab); ccid_ops->ccid_hc_rx_slab = NULL; goto out; } static void ccid_deactivate(struct ccid_operations *ccid_ops) { ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab); ccid_ops->ccid_hc_tx_slab = NULL; ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab); ccid_ops->ccid_hc_rx_slab = NULL; pr_info("DCCP: Deactivated CCID %d (%s)\n", ccid_ops->ccid_id, ccid_ops->ccid_name); } struct ccid *ccid_new(const u8 id, struct sock *sk, bool rx) { struct ccid_operations *ccid_ops = ccid_by_number(id); struct ccid *ccid = NULL; if (ccid_ops == NULL) goto out; ccid = kmem_cache_alloc(rx ? ccid_ops->ccid_hc_rx_slab : ccid_ops->ccid_hc_tx_slab, gfp_any()); if (ccid == NULL) goto out; ccid->ccid_ops = ccid_ops; if (rx) { memset(ccid + 1, 0, ccid_ops->ccid_hc_rx_obj_size); if (ccid->ccid_ops->ccid_hc_rx_init != NULL && ccid->ccid_ops->ccid_hc_rx_init(ccid, sk) != 0) goto out_free_ccid; } else { memset(ccid + 1, 0, ccid_ops->ccid_hc_tx_obj_size); if (ccid->ccid_ops->ccid_hc_tx_init != NULL && ccid->ccid_ops->ccid_hc_tx_init(ccid, sk) != 0) goto out_free_ccid; } out: return ccid; out_free_ccid: kmem_cache_free(rx ? ccid_ops->ccid_hc_rx_slab : ccid_ops->ccid_hc_tx_slab, ccid); ccid = NULL; goto out; } void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk) { if (ccid != NULL) { if (ccid->ccid_ops->ccid_hc_rx_exit != NULL) ccid->ccid_ops->ccid_hc_rx_exit(sk); kmem_cache_free(ccid->ccid_ops->ccid_hc_rx_slab, ccid); } } void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk) { if (ccid != NULL) { if (ccid->ccid_ops->ccid_hc_tx_exit != NULL) ccid->ccid_ops->ccid_hc_tx_exit(sk); kmem_cache_free(ccid->ccid_ops->ccid_hc_tx_slab, ccid); } } int __init ccid_initialize_builtins(void) { int i, err = tfrc_lib_init(); if (err) return err; for (i = 0; i < ARRAY_SIZE(ccids); i++) { err = ccid_activate(ccids[i]); if (err) goto unwind_registrations; } return 0; unwind_registrations: while(--i >= 0) ccid_deactivate(ccids[i]); tfrc_lib_exit(); return err; } void ccid_cleanup_builtins(void) { int i; for (i = 0; i < ARRAY_SIZE(ccids); i++) ccid_deactivate(ccids[i]); tfrc_lib_exit(); }
3 2 3 4 4 4 4 2 4 4 4 4 4 4 4 4 4 9 9 9 9 9 8 2 8 6 8 1 10 10 10 1 10 10 10 10 6 1 2 1 10 1 1 10 1 1 1 1 7 10 10 10 10 10 9 9 8 2 1 7 1 6 6 6 5 6 6 4 10 12 13 12 13 12 5 5 4 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 /* Block- or MTD-based romfs * * Copyright © 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * Derived from: ROMFS file system, Linux implementation * * Copyright © 1997-1999 Janos Farkas <chexum@shadow.banki.hu> * * Using parts of the minix filesystem * Copyright © 1991, 1992 Linus Torvalds * * and parts of the affs filesystem additionally * Copyright © 1993 Ray Burr * Copyright © 1996 Hans-Joachim Widmaier * * Changes * Changed for 2.1.19 modules * Jan 1997 Initial release * Jun 1997 2.1.43+ changes * Proper page locking in read_folio * Changed to work with 2.1.45+ fs * Jul 1997 Fixed follow_link * 2.1.47 * lookup shouldn't return -ENOENT * from Horst von Brand: * fail on wrong checksum * double unlock_super was possible * correct namelen for statfs * spotted by Bill Hawes: * readlink shouldn't iput() * Jun 1998 2.1.106 from Avery Pennarun: glibc scandir() * exposed a problem in readdir * 2.1.107 code-freeze spellchecker run * Aug 1998 2.1.118+ VFS changes * Sep 1998 2.1.122 another VFS change (follow_link) * Apr 1999 2.2.7 no more EBADF checking in * lookup/readdir, use ERR_PTR * Jun 1999 2.3.6 d_alloc_root use changed * 2.3.9 clean up usage of ENOENT/negative * dentries in lookup * clean up page flags setting * (error, uptodate, locking) in * in read_folio * use init_special_inode for * fifos/sockets (and streamline) in * read_inode, fix _ops table order * Aug 1999 2.3.16 __initfunc() => __init change * Oct 1999 2.3.24 page->owner hack obsoleted * Nov 1999 2.3.27 2.3.25+ page->offset => index change * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/string.h> #include <linux/fs.h> #include <linux/time.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/fs_context.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/statfs.h> #include <linux/mtd/super.h> #include <linux/ctype.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/uaccess.h> #include <linux/major.h> #include "internal.h" static struct kmem_cache *romfs_inode_cachep; static const umode_t romfs_modemap[8] = { 0, /* hard link */ S_IFDIR | 0644, /* directory */ S_IFREG | 0644, /* regular file */ S_IFLNK | 0777, /* symlink */ S_IFBLK | 0600, /* blockdev */ S_IFCHR | 0600, /* chardev */ S_IFSOCK | 0644, /* socket */ S_IFIFO | 0644 /* FIFO */ }; static const unsigned char romfs_dtype_table[] = { DT_UNKNOWN, DT_DIR, DT_REG, DT_LNK, DT_BLK, DT_CHR, DT_SOCK, DT_FIFO }; static struct inode *romfs_iget(struct super_block *sb, unsigned long pos); /* * read a page worth of data from the image */ static int romfs_read_folio(struct file *file, struct folio *folio) { struct inode *inode = folio->mapping->host; loff_t offset, size; unsigned long fillsize, pos; void *buf; int ret; buf = kmap_local_folio(folio, 0); offset = folio_pos(folio); size = i_size_read(inode); fillsize = 0; ret = 0; if (offset < size) { size -= offset; fillsize = size > PAGE_SIZE ? PAGE_SIZE : size; pos = ROMFS_I(inode)->i_dataoffset + offset; ret = romfs_dev_read(inode->i_sb, pos, buf, fillsize); if (ret < 0) { fillsize = 0; ret = -EIO; } } buf = folio_zero_tail(folio, fillsize, buf + fillsize); kunmap_local(buf); folio_end_read(folio, ret == 0); return ret; } static const struct address_space_operations romfs_aops = { .read_folio = romfs_read_folio }; /* * read the entries from a directory */ static int romfs_readdir(struct file *file, struct dir_context *ctx) { struct inode *i = file_inode(file); struct romfs_inode ri; unsigned long offset, maxoff; int j, ino, nextfh; char fsname[ROMFS_MAXFN]; /* XXX dynamic? */ int ret; maxoff = romfs_maxsize(i->i_sb); offset = ctx->pos; if (!offset) { offset = i->i_ino & ROMFH_MASK; ret = romfs_dev_read(i->i_sb, offset, &ri, ROMFH_SIZE); if (ret < 0) goto out; offset = be32_to_cpu(ri.spec) & ROMFH_MASK; } /* Not really failsafe, but we are read-only... */ for (;;) { if (!offset || offset >= maxoff) { offset = maxoff; ctx->pos = offset; goto out; } ctx->pos = offset; /* Fetch inode info */ ret = romfs_dev_read(i->i_sb, offset, &ri, ROMFH_SIZE); if (ret < 0) goto out; j = romfs_dev_strnlen(i->i_sb, offset + ROMFH_SIZE, sizeof(fsname) - 1); if (j < 0) goto out; ret = romfs_dev_read(i->i_sb, offset + ROMFH_SIZE, fsname, j); if (ret < 0) goto out; fsname[j] = '\0'; ino = offset; nextfh = be32_to_cpu(ri.next); if ((nextfh & ROMFH_TYPE) == ROMFH_HRD) ino = be32_to_cpu(ri.spec); if (!dir_emit(ctx, fsname, j, ino, romfs_dtype_table[nextfh & ROMFH_TYPE])) goto out; offset = nextfh & ROMFH_MASK; } out: return 0; } /* * look up an entry in a directory */ static struct dentry *romfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { unsigned long offset, maxoff; struct inode *inode = NULL; struct romfs_inode ri; const char *name; /* got from dentry */ int len, ret; offset = dir->i_ino & ROMFH_MASK; ret = romfs_dev_read(dir->i_sb, offset, &ri, ROMFH_SIZE); if (ret < 0) goto error; /* search all the file entries in the list starting from the one * pointed to by the directory's special data */ maxoff = romfs_maxsize(dir->i_sb); offset = be32_to_cpu(ri.spec) & ROMFH_MASK; name = dentry->d_name.name; len = dentry->d_name.len; for (;;) { if (!offset || offset >= maxoff) break; ret = romfs_dev_read(dir->i_sb, offset, &ri, sizeof(ri)); if (ret < 0) goto error; /* try to match the first 16 bytes of name */ ret = romfs_dev_strcmp(dir->i_sb, offset + ROMFH_SIZE, name, len); if (ret < 0) goto error; if (ret == 1) { /* Hard link handling */ if ((be32_to_cpu(ri.next) & ROMFH_TYPE) == ROMFH_HRD) offset = be32_to_cpu(ri.spec) & ROMFH_MASK; inode = romfs_iget(dir->i_sb, offset); break; } /* next entry */ offset = be32_to_cpu(ri.next) & ROMFH_MASK; } return d_splice_alias(inode, dentry); error: return ERR_PTR(ret); } static const struct file_operations romfs_dir_operations = { .read = generic_read_dir, .iterate_shared = romfs_readdir, .llseek = generic_file_llseek, }; static const struct inode_operations romfs_dir_inode_operations = { .lookup = romfs_lookup, }; /* * get a romfs inode based on its position in the image (which doubles as the * inode number) */ static struct inode *romfs_iget(struct super_block *sb, unsigned long pos) { struct romfs_inode_info *inode; struct romfs_inode ri; struct inode *i; unsigned long nlen; unsigned nextfh; int ret; umode_t mode; /* we might have to traverse a chain of "hard link" file entries to get * to the actual file */ for (;;) { ret = romfs_dev_read(sb, pos, &ri, sizeof(ri)); if (ret < 0) goto error; /* XXX: do romfs_checksum here too (with name) */ nextfh = be32_to_cpu(ri.next); if ((nextfh & ROMFH_TYPE) != ROMFH_HRD) break; pos = be32_to_cpu(ri.spec) & ROMFH_MASK; } /* determine the length of the filename */ nlen = romfs_dev_strnlen(sb, pos + ROMFH_SIZE, ROMFS_MAXFN); if (IS_ERR_VALUE(nlen)) goto eio; /* get an inode for this image position */ i = iget_locked(sb, pos); if (!i) return ERR_PTR(-ENOMEM); if (!(i->i_state & I_NEW)) return i; /* precalculate the data offset */ inode = ROMFS_I(i); inode->i_metasize = (ROMFH_SIZE + nlen + 1 + ROMFH_PAD) & ROMFH_MASK; inode->i_dataoffset = pos + inode->i_metasize; set_nlink(i, 1); /* Hard to decide.. */ i->i_size = be32_to_cpu(ri.size); inode_set_mtime_to_ts(i, inode_set_atime_to_ts(i, inode_set_ctime(i, 0, 0))); /* set up mode and ops */ mode = romfs_modemap[nextfh & ROMFH_TYPE]; switch (nextfh & ROMFH_TYPE) { case ROMFH_DIR: i->i_size = ROMFS_I(i)->i_metasize; i->i_op = &romfs_dir_inode_operations; i->i_fop = &romfs_dir_operations; if (nextfh & ROMFH_EXEC) mode |= S_IXUGO; break; case ROMFH_REG: i->i_fop = &romfs_ro_fops; i->i_data.a_ops = &romfs_aops; if (nextfh & ROMFH_EXEC) mode |= S_IXUGO; break; case ROMFH_SYM: i->i_op = &page_symlink_inode_operations; inode_nohighmem(i); i->i_data.a_ops = &romfs_aops; mode |= S_IRWXUGO; break; default: /* depending on MBZ for sock/fifos */ nextfh = be32_to_cpu(ri.spec); init_special_inode(i, mode, MKDEV(nextfh >> 16, nextfh & 0xffff)); break; } i->i_mode = mode; i->i_blocks = (i->i_size + 511) >> 9; unlock_new_inode(i); return i; eio: ret = -EIO; error: pr_err("read error for inode 0x%lx\n", pos); return ERR_PTR(ret); } /* * allocate a new inode */ static struct inode *romfs_alloc_inode(struct super_block *sb) { struct romfs_inode_info *inode; inode = alloc_inode_sb(sb, romfs_inode_cachep, GFP_KERNEL); return inode ? &inode->vfs_inode : NULL; } /* * return a spent inode to the slab cache */ static void romfs_free_inode(struct inode *inode) { kmem_cache_free(romfs_inode_cachep, ROMFS_I(inode)); } /* * get filesystem statistics */ static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; u64 id = 0; /* When calling huge_encode_dev(), * use sb->s_bdev->bd_dev when, * - CONFIG_ROMFS_ON_BLOCK defined * use sb->s_dev when, * - CONFIG_ROMFS_ON_BLOCK undefined and * - CONFIG_ROMFS_ON_MTD defined * leave id as 0 when, * - CONFIG_ROMFS_ON_BLOCK undefined and * - CONFIG_ROMFS_ON_MTD undefined */ if (sb->s_bdev) id = huge_encode_dev(sb->s_bdev->bd_dev); else if (sb->s_dev) id = huge_encode_dev(sb->s_dev); buf->f_type = ROMFS_MAGIC; buf->f_namelen = ROMFS_MAXFN; buf->f_bsize = ROMBSIZE; buf->f_bfree = buf->f_bavail = buf->f_ffree; buf->f_blocks = (romfs_maxsize(dentry->d_sb) + ROMBSIZE - 1) >> ROMBSBITS; buf->f_fsid = u64_to_fsid(id); return 0; } /* * remounting must involve read-only */ static int romfs_reconfigure(struct fs_context *fc) { sync_filesystem(fc->root->d_sb); fc->sb_flags |= SB_RDONLY; return 0; } static const struct super_operations romfs_super_ops = { .alloc_inode = romfs_alloc_inode, .free_inode = romfs_free_inode, .statfs = romfs_statfs, }; /* * checksum check on part of a romfs filesystem */ static __u32 romfs_checksum(const void *data, int size) { const __be32 *ptr = data; __u32 sum; sum = 0; size >>= 2; while (size > 0) { sum += be32_to_cpu(*ptr++); size--; } return sum; } /* * fill in the superblock */ static int romfs_fill_super(struct super_block *sb, struct fs_context *fc) { struct romfs_super_block *rsb; struct inode *root; unsigned long pos, img_size; const char *storage; size_t len; int ret; #ifdef CONFIG_BLOCK if (!sb->s_mtd) { sb_set_blocksize(sb, ROMBSIZE); } else { sb->s_blocksize = ROMBSIZE; sb->s_blocksize_bits = blksize_bits(ROMBSIZE); } #endif sb->s_maxbytes = 0xFFFFFFFF; sb->s_magic = ROMFS_MAGIC; sb->s_flags |= SB_RDONLY | SB_NOATIME; sb->s_time_min = 0; sb->s_time_max = 0; sb->s_op = &romfs_super_ops; #ifdef CONFIG_ROMFS_ON_MTD /* Use same dev ID from the underlying mtdblock device */ if (sb->s_mtd) sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, sb->s_mtd->index); #endif /* read the image superblock and check it */ rsb = kmalloc(512, GFP_KERNEL); if (!rsb) return -ENOMEM; sb->s_fs_info = (void *) 512; ret = romfs_dev_read(sb, 0, rsb, 512); if (ret < 0) goto error_rsb; img_size = be32_to_cpu(rsb->size); if (sb->s_mtd && img_size > sb->s_mtd->size) goto error_rsb_inval; sb->s_fs_info = (void *) img_size; if (rsb->word0 != ROMSB_WORD0 || rsb->word1 != ROMSB_WORD1 || img_size < ROMFH_SIZE) { if (!(fc->sb_flags & SB_SILENT)) errorf(fc, "VFS: Can't find a romfs filesystem on dev %s.\n", sb->s_id); goto error_rsb_inval; } if (romfs_checksum(rsb, min_t(size_t, img_size, 512))) { pr_err("bad initial checksum on dev %s.\n", sb->s_id); goto error_rsb_inval; } storage = sb->s_mtd ? "MTD" : "the block layer"; len = strnlen(rsb->name, ROMFS_MAXFN); if (!(fc->sb_flags & SB_SILENT)) pr_notice("Mounting image '%*.*s' through %s\n", (unsigned) len, (unsigned) len, rsb->name, storage); kfree(rsb); rsb = NULL; /* find the root directory */ pos = (ROMFH_SIZE + len + 1 + ROMFH_PAD) & ROMFH_MASK; root = romfs_iget(sb, pos); if (IS_ERR(root)) return PTR_ERR(root); sb->s_root = d_make_root(root); if (!sb->s_root) return -ENOMEM; return 0; error_rsb_inval: ret = -EINVAL; error_rsb: kfree(rsb); return ret; } /* * get a superblock for mounting */ static int romfs_get_tree(struct fs_context *fc) { int ret = -EINVAL; #ifdef CONFIG_ROMFS_ON_MTD ret = get_tree_mtd(fc, romfs_fill_super); #endif #ifdef CONFIG_ROMFS_ON_BLOCK if (ret == -EINVAL) ret = get_tree_bdev(fc, romfs_fill_super); #endif return ret; } static const struct fs_context_operations romfs_context_ops = { .get_tree = romfs_get_tree, .reconfigure = romfs_reconfigure, }; /* * Set up the filesystem mount context. */ static int romfs_init_fs_context(struct fs_context *fc) { fc->ops = &romfs_context_ops; return 0; } /* * destroy a romfs superblock in the appropriate manner */ static void romfs_kill_sb(struct super_block *sb) { generic_shutdown_super(sb); #ifdef CONFIG_ROMFS_ON_MTD if (sb->s_mtd) { put_mtd_device(sb->s_mtd); sb->s_mtd = NULL; } #endif #ifdef CONFIG_ROMFS_ON_BLOCK if (sb->s_bdev) { sync_blockdev(sb->s_bdev); bdev_fput(sb->s_bdev_file); } #endif } static struct file_system_type romfs_fs_type = { .owner = THIS_MODULE, .name = "romfs", .init_fs_context = romfs_init_fs_context, .kill_sb = romfs_kill_sb, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("romfs"); /* * inode storage initialiser */ static void romfs_i_init_once(void *_inode) { struct romfs_inode_info *inode = _inode; inode_init_once(&inode->vfs_inode); } /* * romfs module initialisation */ static int __init init_romfs_fs(void) { int ret; pr_info("ROMFS MTD (C) 2007 Red Hat, Inc.\n"); romfs_inode_cachep = kmem_cache_create("romfs_i", sizeof(struct romfs_inode_info), 0, SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT, romfs_i_init_once); if (!romfs_inode_cachep) { pr_err("Failed to initialise inode cache\n"); return -ENOMEM; } ret = register_filesystem(&romfs_fs_type); if (ret) { pr_err("Failed to register filesystem\n"); goto error_register; } return 0; error_register: kmem_cache_destroy(romfs_inode_cachep); return ret; } /* * romfs module removal */ static void __exit exit_romfs_fs(void) { unregister_filesystem(&romfs_fs_type); /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(romfs_inode_cachep); } module_init(init_romfs_fs); module_exit(exit_romfs_fs); MODULE_DESCRIPTION("Direct-MTD Capable RomFS"); MODULE_AUTHOR("Red Hat, Inc."); MODULE_LICENSE("GPL"); /* Actually dual-licensed, but it doesn't matter for */
17 17 17 17 13 17 17 17 17 17 14 13 17 17 15 15 10 10 10 17 17 13 13 13 13 12 13 13 13 13 17 17 17 17 17 17 17 17 13 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 7 7 7 7 7 7 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 // SPDX-License-Identifier: GPL-2.0-only /* * hwmon.c - part of lm_sensors, Linux kernel modules for hardware monitoring * * This file defines the sysfs class "hwmon", for use by sensors drivers. * * Copyright (C) 2005 Mark M. Hoffman <mhoffman@lightlink.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/bitops.h> #include <linux/device.h> #include <linux/err.h> #include <linux/gfp.h> #include <linux/hwmon.h> #include <linux/i2c.h> #include <linux/idr.h> #include <linux/kstrtox.h> #include <linux/list.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/property.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/thermal.h> #define CREATE_TRACE_POINTS #include <trace/events/hwmon.h> #define HWMON_ID_PREFIX "hwmon" #define HWMON_ID_FORMAT HWMON_ID_PREFIX "%d" struct hwmon_device { const char *name; const char *label; struct device dev; const struct hwmon_chip_info *chip; struct list_head tzdata; struct attribute_group group; const struct attribute_group **groups; }; #define to_hwmon_device(d) container_of(d, struct hwmon_device, dev) #define MAX_SYSFS_ATTR_NAME_LENGTH 32 struct hwmon_device_attribute { struct device_attribute dev_attr; const struct hwmon_ops *ops; enum hwmon_sensor_types type; u32 attr; int index; char name[MAX_SYSFS_ATTR_NAME_LENGTH]; }; #define to_hwmon_attr(d) \ container_of(d, struct hwmon_device_attribute, dev_attr) #define to_dev_attr(a) container_of(a, struct device_attribute, attr) /* * Thermal zone information */ struct hwmon_thermal_data { struct list_head node; /* hwmon tzdata list entry */ struct device *dev; /* Reference to hwmon device */ int index; /* sensor index */ struct thermal_zone_device *tzd;/* thermal zone device */ }; static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%s\n", to_hwmon_device(dev)->name); } static DEVICE_ATTR_RO(name); static ssize_t label_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%s\n", to_hwmon_device(dev)->label); } static DEVICE_ATTR_RO(label); static struct attribute *hwmon_dev_attrs[] = { &dev_attr_name.attr, &dev_attr_label.attr, NULL }; static umode_t hwmon_dev_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n) { struct device *dev = kobj_to_dev(kobj); struct hwmon_device *hdev = to_hwmon_device(dev); if (attr == &dev_attr_name.attr && hdev->name == NULL) return 0; if (attr == &dev_attr_label.attr && hdev->label == NULL) return 0; return attr->mode; } static const struct attribute_group hwmon_dev_attr_group = { .attrs = hwmon_dev_attrs, .is_visible = hwmon_dev_attr_is_visible, }; static const struct attribute_group *hwmon_dev_attr_groups[] = { &hwmon_dev_attr_group, NULL }; static void hwmon_free_attrs(struct attribute **attrs) { int i; for (i = 0; attrs[i]; i++) { struct device_attribute *dattr = to_dev_attr(attrs[i]); struct hwmon_device_attribute *hattr = to_hwmon_attr(dattr); kfree(hattr); } kfree(attrs); } static void hwmon_dev_release(struct device *dev) { struct hwmon_device *hwdev = to_hwmon_device(dev); if (hwdev->group.attrs) hwmon_free_attrs(hwdev->group.attrs); kfree(hwdev->groups); kfree(hwdev->label); kfree(hwdev); } static const struct class hwmon_class = { .name = "hwmon", .dev_groups = hwmon_dev_attr_groups, .dev_release = hwmon_dev_release, }; static DEFINE_IDA(hwmon_ida); static umode_t hwmon_is_visible(const struct hwmon_ops *ops, const void *drvdata, enum hwmon_sensor_types type, u32 attr, int channel) { if (ops->visible) return ops->visible; return ops->is_visible(drvdata, type, attr, channel); } /* Thermal zone handling */ static int hwmon_thermal_get_temp(struct thermal_zone_device *tz, int *temp) { struct hwmon_thermal_data *tdata = thermal_zone_device_priv(tz); struct hwmon_device *hwdev = to_hwmon_device(tdata->dev); int ret; long t; ret = hwdev->chip->ops->read(tdata->dev, hwmon_temp, hwmon_temp_input, tdata->index, &t); if (ret < 0) return ret; *temp = t; return 0; } static int hwmon_thermal_set_trips(struct thermal_zone_device *tz, int low, int high) { struct hwmon_thermal_data *tdata = thermal_zone_device_priv(tz); struct hwmon_device *hwdev = to_hwmon_device(tdata->dev); const struct hwmon_chip_info *chip = hwdev->chip; const struct hwmon_channel_info * const *info = chip->info; unsigned int i; int err; if (!chip->ops->write) return 0; for (i = 0; info[i] && info[i]->type != hwmon_temp; i++) continue; if (!info[i]) return 0; if (info[i]->config[tdata->index] & HWMON_T_MIN) { err = chip->ops->write(tdata->dev, hwmon_temp, hwmon_temp_min, tdata->index, low); if (err && err != -EOPNOTSUPP) return err; } if (info[i]->config[tdata->index] & HWMON_T_MAX) { err = chip->ops->write(tdata->dev, hwmon_temp, hwmon_temp_max, tdata->index, high); if (err && err != -EOPNOTSUPP) return err; } return 0; } static const struct thermal_zone_device_ops hwmon_thermal_ops = { .get_temp = hwmon_thermal_get_temp, .set_trips = hwmon_thermal_set_trips, }; static void hwmon_thermal_remove_sensor(void *data) { list_del(data); } static int hwmon_thermal_add_sensor(struct device *dev, int index) { struct hwmon_device *hwdev = to_hwmon_device(dev); struct hwmon_thermal_data *tdata; struct thermal_zone_device *tzd; int err; tdata = devm_kzalloc(dev, sizeof(*tdata), GFP_KERNEL); if (!tdata) return -ENOMEM; tdata->dev = dev; tdata->index = index; tzd = devm_thermal_of_zone_register(dev, index, tdata, &hwmon_thermal_ops); if (IS_ERR(tzd)) { if (PTR_ERR(tzd) != -ENODEV) return PTR_ERR(tzd); dev_info(dev, "temp%d_input not attached to any thermal zone\n", index + 1); devm_kfree(dev, tdata); return 0; } err = devm_add_action(dev, hwmon_thermal_remove_sensor, &tdata->node); if (err) return err; tdata->tzd = tzd; list_add(&tdata->node, &hwdev->tzdata); return 0; } static int hwmon_thermal_register_sensors(struct device *dev) { struct hwmon_device *hwdev = to_hwmon_device(dev); const struct hwmon_chip_info *chip = hwdev->chip; const struct hwmon_channel_info * const *info = chip->info; void *drvdata = dev_get_drvdata(dev); int i; if (!IS_ENABLED(CONFIG_THERMAL_OF)) return 0; for (i = 1; info[i]; i++) { int j; if (info[i]->type != hwmon_temp) continue; for (j = 0; info[i]->config[j]; j++) { int err; if (!(info[i]->config[j] & HWMON_T_INPUT) || !hwmon_is_visible(chip->ops, drvdata, hwmon_temp, hwmon_temp_input, j)) continue; err = hwmon_thermal_add_sensor(dev, j); if (err) return err; } } return 0; } static void hwmon_thermal_notify(struct device *dev, int index) { struct hwmon_device *hwdev = to_hwmon_device(dev); struct hwmon_thermal_data *tzdata; if (!IS_ENABLED(CONFIG_THERMAL_OF)) return; list_for_each_entry(tzdata, &hwdev->tzdata, node) { if (tzdata->index == index) { thermal_zone_device_update(tzdata->tzd, THERMAL_EVENT_UNSPECIFIED); } } } static int hwmon_attr_base(enum hwmon_sensor_types type) { if (type == hwmon_in || type == hwmon_intrusion) return 0; return 1; } #if IS_REACHABLE(CONFIG_I2C) /* * PEC support * * The 'pec' attribute is attached to I2C client devices. It is only provided * if the i2c controller supports PEC. * * The mutex ensures that PEC configuration between i2c device and the hardware * is consistent. Use a single mutex because attribute writes are supposed to be * rare, and maintaining a separate mutex for each hardware monitoring device * would add substantial complexity to the driver for little if any gain. * * The hardware monitoring device is identified as child of the i2c client * device. This assumes that only a single hardware monitoring device is * attached to an i2c client device. */ static DEFINE_MUTEX(hwmon_pec_mutex); static int hwmon_match_device(struct device *dev, const void *data) { return dev->class == &hwmon_class; } static ssize_t pec_show(struct device *dev, struct device_attribute *dummy, char *buf) { struct i2c_client *client = to_i2c_client(dev); return sysfs_emit(buf, "%d\n", !!(client->flags & I2C_CLIENT_PEC)); } static ssize_t pec_store(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct hwmon_device *hwdev; struct device *hdev; bool val; int err; err = kstrtobool(buf, &val); if (err < 0) return err; hdev = device_find_child(dev, NULL, hwmon_match_device); if (!hdev) return -ENODEV; mutex_lock(&hwmon_pec_mutex); /* * If there is no write function, we assume that chip specific * handling is not required. */ hwdev = to_hwmon_device(hdev); if (hwdev->chip->ops->write) { err = hwdev->chip->ops->write(hdev, hwmon_chip, hwmon_chip_pec, 0, val); if (err && err != -EOPNOTSUPP) goto unlock; } if (!val) client->flags &= ~I2C_CLIENT_PEC; else client->flags |= I2C_CLIENT_PEC; err = count; unlock: mutex_unlock(&hwmon_pec_mutex); put_device(hdev); return err; } static DEVICE_ATTR_RW(pec); static void hwmon_remove_pec(void *dev) { device_remove_file(dev, &dev_attr_pec); } static int hwmon_pec_register(struct device *hdev) { struct i2c_client *client = i2c_verify_client(hdev->parent); int err; if (!client) return -EINVAL; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_PEC)) return 0; err = device_create_file(&client->dev, &dev_attr_pec); if (err) return err; return devm_add_action_or_reset(hdev, hwmon_remove_pec, &client->dev); } #else /* CONFIG_I2C */ static int hwmon_pec_register(struct device *hdev) { return -EINVAL; } #endif /* CONFIG_I2C */ /* sysfs attribute management */ static ssize_t hwmon_attr_show(struct device *dev, struct device_attribute *devattr, char *buf) { struct hwmon_device_attribute *hattr = to_hwmon_attr(devattr); long val; int ret; ret = hattr->ops->read(dev, hattr->type, hattr->attr, hattr->index, &val); if (ret < 0) return ret; trace_hwmon_attr_show(hattr->index + hwmon_attr_base(hattr->type), hattr->name, val); return sprintf(buf, "%ld\n", val); } static ssize_t hwmon_attr_show_string(struct device *dev, struct device_attribute *devattr, char *buf) { struct hwmon_device_attribute *hattr = to_hwmon_attr(devattr); enum hwmon_sensor_types type = hattr->type; const char *s; int ret; ret = hattr->ops->read_string(dev, hattr->type, hattr->attr, hattr->index, &s); if (ret < 0) return ret; trace_hwmon_attr_show_string(hattr->index + hwmon_attr_base(type), hattr->name, s); return sprintf(buf, "%s\n", s); } static ssize_t hwmon_attr_store(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct hwmon_device_attribute *hattr = to_hwmon_attr(devattr); long val; int ret; ret = kstrtol(buf, 10, &val); if (ret < 0) return ret; ret = hattr->ops->write(dev, hattr->type, hattr->attr, hattr->index, val); if (ret < 0) return ret; trace_hwmon_attr_store(hattr->index + hwmon_attr_base(hattr->type), hattr->name, val); return count; } static bool is_string_attr(enum hwmon_sensor_types type, u32 attr) { return (type == hwmon_temp && attr == hwmon_temp_label) || (type == hwmon_in && attr == hwmon_in_label) || (type == hwmon_curr && attr == hwmon_curr_label) || (type == hwmon_power && attr == hwmon_power_label) || (type == hwmon_energy && attr == hwmon_energy_label) || (type == hwmon_humidity && attr == hwmon_humidity_label) || (type == hwmon_fan && attr == hwmon_fan_label); } static struct attribute *hwmon_genattr(const void *drvdata, enum hwmon_sensor_types type, u32 attr, int index, const char *template, const struct hwmon_ops *ops) { struct hwmon_device_attribute *hattr; struct device_attribute *dattr; struct attribute *a; umode_t mode; const char *name; bool is_string = is_string_attr(type, attr); mode = hwmon_is_visible(ops, drvdata, type, attr, index); if (!mode) return ERR_PTR(-ENOENT); if ((mode & 0444) && ((is_string && !ops->read_string) || (!is_string && !ops->read))) return ERR_PTR(-EINVAL); if ((mode & 0222) && !ops->write) return ERR_PTR(-EINVAL); hattr = kzalloc(sizeof(*hattr), GFP_KERNEL); if (!hattr) return ERR_PTR(-ENOMEM); if (type == hwmon_chip) { name = template; } else { scnprintf(hattr->name, sizeof(hattr->name), template, index + hwmon_attr_base(type)); name = hattr->name; } hattr->type = type; hattr->attr = attr; hattr->index = index; hattr->ops = ops; dattr = &hattr->dev_attr; dattr->show = is_string ? hwmon_attr_show_string : hwmon_attr_show; dattr->store = hwmon_attr_store; a = &dattr->attr; sysfs_attr_init(a); a->name = name; a->mode = mode; return a; } /* * Chip attributes are not attribute templates but actual sysfs attributes. * See hwmon_genattr() for special handling. */ static const char * const hwmon_chip_attrs[] = { [hwmon_chip_temp_reset_history] = "temp_reset_history", [hwmon_chip_in_reset_history] = "in_reset_history", [hwmon_chip_curr_reset_history] = "curr_reset_history", [hwmon_chip_power_reset_history] = "power_reset_history", [hwmon_chip_update_interval] = "update_interval", [hwmon_chip_alarms] = "alarms", [hwmon_chip_samples] = "samples", [hwmon_chip_curr_samples] = "curr_samples", [hwmon_chip_in_samples] = "in_samples", [hwmon_chip_power_samples] = "power_samples", [hwmon_chip_temp_samples] = "temp_samples", [hwmon_chip_beep_enable] = "beep_enable", }; static const char * const hwmon_temp_attr_templates[] = { [hwmon_temp_enable] = "temp%d_enable", [hwmon_temp_input] = "temp%d_input", [hwmon_temp_type] = "temp%d_type", [hwmon_temp_lcrit] = "temp%d_lcrit", [hwmon_temp_lcrit_hyst] = "temp%d_lcrit_hyst", [hwmon_temp_min] = "temp%d_min", [hwmon_temp_min_hyst] = "temp%d_min_hyst", [hwmon_temp_max] = "temp%d_max", [hwmon_temp_max_hyst] = "temp%d_max_hyst", [hwmon_temp_crit] = "temp%d_crit", [hwmon_temp_crit_hyst] = "temp%d_crit_hyst", [hwmon_temp_emergency] = "temp%d_emergency", [hwmon_temp_emergency_hyst] = "temp%d_emergency_hyst", [hwmon_temp_alarm] = "temp%d_alarm", [hwmon_temp_lcrit_alarm] = "temp%d_lcrit_alarm", [hwmon_temp_min_alarm] = "temp%d_min_alarm", [hwmon_temp_max_alarm] = "temp%d_max_alarm", [hwmon_temp_crit_alarm] = "temp%d_crit_alarm", [hwmon_temp_emergency_alarm] = "temp%d_emergency_alarm", [hwmon_temp_fault] = "temp%d_fault", [hwmon_temp_offset] = "temp%d_offset", [hwmon_temp_label] = "temp%d_label", [hwmon_temp_lowest] = "temp%d_lowest", [hwmon_temp_highest] = "temp%d_highest", [hwmon_temp_reset_history] = "temp%d_reset_history", [hwmon_temp_rated_min] = "temp%d_rated_min", [hwmon_temp_rated_max] = "temp%d_rated_max", [hwmon_temp_beep] = "temp%d_beep", }; static const char * const hwmon_in_attr_templates[] = { [hwmon_in_enable] = "in%d_enable", [hwmon_in_input] = "in%d_input", [hwmon_in_min] = "in%d_min", [hwmon_in_max] = "in%d_max", [hwmon_in_lcrit] = "in%d_lcrit", [hwmon_in_crit] = "in%d_crit", [hwmon_in_average] = "in%d_average", [hwmon_in_lowest] = "in%d_lowest", [hwmon_in_highest] = "in%d_highest", [hwmon_in_reset_history] = "in%d_reset_history", [hwmon_in_label] = "in%d_label", [hwmon_in_alarm] = "in%d_alarm", [hwmon_in_min_alarm] = "in%d_min_alarm", [hwmon_in_max_alarm] = "in%d_max_alarm", [hwmon_in_lcrit_alarm] = "in%d_lcrit_alarm", [hwmon_in_crit_alarm] = "in%d_crit_alarm", [hwmon_in_rated_min] = "in%d_rated_min", [hwmon_in_rated_max] = "in%d_rated_max", [hwmon_in_beep] = "in%d_beep", [hwmon_in_fault] = "in%d_fault", }; static const char * const hwmon_curr_attr_templates[] = { [hwmon_curr_enable] = "curr%d_enable", [hwmon_curr_input] = "curr%d_input", [hwmon_curr_min] = "curr%d_min", [hwmon_curr_max] = "curr%d_max", [hwmon_curr_lcrit] = "curr%d_lcrit", [hwmon_curr_crit] = "curr%d_crit", [hwmon_curr_average] = "curr%d_average", [hwmon_curr_lowest] = "curr%d_lowest", [hwmon_curr_highest] = "curr%d_highest", [hwmon_curr_reset_history] = "curr%d_reset_history", [hwmon_curr_label] = "curr%d_label", [hwmon_curr_alarm] = "curr%d_alarm", [hwmon_curr_min_alarm] = "curr%d_min_alarm", [hwmon_curr_max_alarm] = "curr%d_max_alarm", [hwmon_curr_lcrit_alarm] = "curr%d_lcrit_alarm", [hwmon_curr_crit_alarm] = "curr%d_crit_alarm", [hwmon_curr_rated_min] = "curr%d_rated_min", [hwmon_curr_rated_max] = "curr%d_rated_max", [hwmon_curr_beep] = "curr%d_beep", }; static const char * const hwmon_power_attr_templates[] = { [hwmon_power_enable] = "power%d_enable", [hwmon_power_average] = "power%d_average", [hwmon_power_average_interval] = "power%d_average_interval", [hwmon_power_average_interval_max] = "power%d_interval_max", [hwmon_power_average_interval_min] = "power%d_interval_min", [hwmon_power_average_highest] = "power%d_average_highest", [hwmon_power_average_lowest] = "power%d_average_lowest", [hwmon_power_average_max] = "power%d_average_max", [hwmon_power_average_min] = "power%d_average_min", [hwmon_power_input] = "power%d_input", [hwmon_power_input_highest] = "power%d_input_highest", [hwmon_power_input_lowest] = "power%d_input_lowest", [hwmon_power_reset_history] = "power%d_reset_history", [hwmon_power_accuracy] = "power%d_accuracy", [hwmon_power_cap] = "power%d_cap", [hwmon_power_cap_hyst] = "power%d_cap_hyst", [hwmon_power_cap_max] = "power%d_cap_max", [hwmon_power_cap_min] = "power%d_cap_min", [hwmon_power_min] = "power%d_min", [hwmon_power_max] = "power%d_max", [hwmon_power_lcrit] = "power%d_lcrit", [hwmon_power_crit] = "power%d_crit", [hwmon_power_label] = "power%d_label", [hwmon_power_alarm] = "power%d_alarm", [hwmon_power_cap_alarm] = "power%d_cap_alarm", [hwmon_power_min_alarm] = "power%d_min_alarm", [hwmon_power_max_alarm] = "power%d_max_alarm", [hwmon_power_lcrit_alarm] = "power%d_lcrit_alarm", [hwmon_power_crit_alarm] = "power%d_crit_alarm", [hwmon_power_rated_min] = "power%d_rated_min", [hwmon_power_rated_max] = "power%d_rated_max", }; static const char * const hwmon_energy_attr_templates[] = { [hwmon_energy_enable] = "energy%d_enable", [hwmon_energy_input] = "energy%d_input", [hwmon_energy_label] = "energy%d_label", }; static const char * const hwmon_humidity_attr_templates[] = { [hwmon_humidity_enable] = "humidity%d_enable", [hwmon_humidity_input] = "humidity%d_input", [hwmon_humidity_label] = "humidity%d_label", [hwmon_humidity_min] = "humidity%d_min", [hwmon_humidity_min_hyst] = "humidity%d_min_hyst", [hwmon_humidity_max] = "humidity%d_max", [hwmon_humidity_max_hyst] = "humidity%d_max_hyst", [hwmon_humidity_alarm] = "humidity%d_alarm", [hwmon_humidity_fault] = "humidity%d_fault", [hwmon_humidity_rated_min] = "humidity%d_rated_min", [hwmon_humidity_rated_max] = "humidity%d_rated_max", [hwmon_humidity_min_alarm] = "humidity%d_min_alarm", [hwmon_humidity_max_alarm] = "humidity%d_max_alarm", }; static const char * const hwmon_fan_attr_templates[] = { [hwmon_fan_enable] = "fan%d_enable", [hwmon_fan_input] = "fan%d_input", [hwmon_fan_label] = "fan%d_label", [hwmon_fan_min] = "fan%d_min", [hwmon_fan_max] = "fan%d_max", [hwmon_fan_div] = "fan%d_div", [hwmon_fan_pulses] = "fan%d_pulses", [hwmon_fan_target] = "fan%d_target", [hwmon_fan_alarm] = "fan%d_alarm", [hwmon_fan_min_alarm] = "fan%d_min_alarm", [hwmon_fan_max_alarm] = "fan%d_max_alarm", [hwmon_fan_fault] = "fan%d_fault", [hwmon_fan_beep] = "fan%d_beep", }; static const char * const hwmon_pwm_attr_templates[] = { [hwmon_pwm_input] = "pwm%d", [hwmon_pwm_enable] = "pwm%d_enable", [hwmon_pwm_mode] = "pwm%d_mode", [hwmon_pwm_freq] = "pwm%d_freq", [hwmon_pwm_auto_channels_temp] = "pwm%d_auto_channels_temp", }; static const char * const hwmon_intrusion_attr_templates[] = { [hwmon_intrusion_alarm] = "intrusion%d_alarm", [hwmon_intrusion_beep] = "intrusion%d_beep", }; static const char * const *__templates[] = { [hwmon_chip] = hwmon_chip_attrs, [hwmon_temp] = hwmon_temp_attr_templates, [hwmon_in] = hwmon_in_attr_templates, [hwmon_curr] = hwmon_curr_attr_templates, [hwmon_power] = hwmon_power_attr_templates, [hwmon_energy] = hwmon_energy_attr_templates, [hwmon_humidity] = hwmon_humidity_attr_templates, [hwmon_fan] = hwmon_fan_attr_templates, [hwmon_pwm] = hwmon_pwm_attr_templates, [hwmon_intrusion] = hwmon_intrusion_attr_templates, }; static const int __templates_size[] = { [hwmon_chip] = ARRAY_SIZE(hwmon_chip_attrs), [hwmon_temp] = ARRAY_SIZE(hwmon_temp_attr_templates), [hwmon_in] = ARRAY_SIZE(hwmon_in_attr_templates), [hwmon_curr] = ARRAY_SIZE(hwmon_curr_attr_templates), [hwmon_power] = ARRAY_SIZE(hwmon_power_attr_templates), [hwmon_energy] = ARRAY_SIZE(hwmon_energy_attr_templates), [hwmon_humidity] = ARRAY_SIZE(hwmon_humidity_attr_templates), [hwmon_fan] = ARRAY_SIZE(hwmon_fan_attr_templates), [hwmon_pwm] = ARRAY_SIZE(hwmon_pwm_attr_templates), [hwmon_intrusion] = ARRAY_SIZE(hwmon_intrusion_attr_templates), }; int hwmon_notify_event(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel) { char event[MAX_SYSFS_ATTR_NAME_LENGTH + 5]; char sattr[MAX_SYSFS_ATTR_NAME_LENGTH]; char *envp[] = { event, NULL }; const char * const *templates; const char *template; int base; if (type >= ARRAY_SIZE(__templates)) return -EINVAL; if (attr >= __templates_size[type]) return -EINVAL; templates = __templates[type]; template = templates[attr]; base = hwmon_attr_base(type); scnprintf(sattr, MAX_SYSFS_ATTR_NAME_LENGTH, template, base + channel); scnprintf(event, sizeof(event), "NAME=%s", sattr); sysfs_notify(&dev->kobj, NULL, sattr); kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp); if (type == hwmon_temp) hwmon_thermal_notify(dev, channel); return 0; } EXPORT_SYMBOL_GPL(hwmon_notify_event); static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info) { int i, n; for (i = n = 0; info->config[i]; i++) n += hweight32(info->config[i]); return n; } static int hwmon_genattrs(const void *drvdata, struct attribute **attrs, const struct hwmon_ops *ops, const struct hwmon_channel_info *info) { const char * const *templates; int template_size; int i, aindex = 0; if (info->type >= ARRAY_SIZE(__templates)) return -EINVAL; templates = __templates[info->type]; template_size = __templates_size[info->type]; for (i = 0; info->config[i]; i++) { u32 attr_mask = info->config[i]; u32 attr; while (attr_mask) { struct attribute *a; attr = __ffs(attr_mask); attr_mask &= ~BIT(attr); if (attr >= template_size || !templates[attr]) continue; /* attribute is invisible */ a = hwmon_genattr(drvdata, info->type, attr, i, templates[attr], ops); if (IS_ERR(a)) { if (PTR_ERR(a) != -ENOENT) return PTR_ERR(a); continue; } attrs[aindex++] = a; } } return aindex; } static struct attribute ** __hwmon_create_attrs(const void *drvdata, const struct hwmon_chip_info *chip) { int ret, i, aindex = 0, nattrs = 0; struct attribute **attrs; for (i = 0; chip->info[i]; i++) nattrs += hwmon_num_channel_attrs(chip->info[i]); if (nattrs == 0) return ERR_PTR(-EINVAL); attrs = kcalloc(nattrs + 1, sizeof(*attrs), GFP_KERNEL); if (!attrs) return ERR_PTR(-ENOMEM); for (i = 0; chip->info[i]; i++) { ret = hwmon_genattrs(drvdata, &attrs[aindex], chip->ops, chip->info[i]); if (ret < 0) { hwmon_free_attrs(attrs); return ERR_PTR(ret); } aindex += ret; } return attrs; } static struct device * __hwmon_device_register(struct device *dev, const char *name, void *drvdata, const struct hwmon_chip_info *chip, const struct attribute_group **groups) { struct hwmon_device *hwdev; const char *label; struct device *hdev; struct device *tdev = dev; int i, err, id; /* Complain about invalid characters in hwmon name attribute */ if (name && (!strlen(name) || strpbrk(name, "-* \t\n"))) dev_warn(dev, "hwmon: '%s' is not a valid name attribute, please fix\n", name); id = ida_alloc(&hwmon_ida, GFP_KERNEL); if (id < 0) return ERR_PTR(id); hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL); if (hwdev == NULL) { err = -ENOMEM; goto ida_remove; } hdev = &hwdev->dev; if (chip) { struct attribute **attrs; int ngroups = 2; /* terminating NULL plus &hwdev->groups */ if (groups) for (i = 0; groups[i]; i++) ngroups++; hwdev->groups = kcalloc(ngroups, sizeof(*groups), GFP_KERNEL); if (!hwdev->groups) { err = -ENOMEM; goto free_hwmon; } attrs = __hwmon_create_attrs(drvdata, chip); if (IS_ERR(attrs)) { err = PTR_ERR(attrs); goto free_hwmon; } hwdev->group.attrs = attrs; ngroups = 0; hwdev->groups[ngroups++] = &hwdev->group; if (groups) { for (i = 0; groups[i]; i++) hwdev->groups[ngroups++] = groups[i]; } hdev->groups = hwdev->groups; } else { hdev->groups = groups; } if (dev && device_property_present(dev, "label")) { err = device_property_read_string(dev, "label", &label); if (err < 0) goto free_hwmon; hwdev->label = kstrdup(label, GFP_KERNEL); if (hwdev->label == NULL) { err = -ENOMEM; goto free_hwmon; } } hwdev->name = name; hdev->class = &hwmon_class; hdev->parent = dev; while (tdev && !tdev->of_node) tdev = tdev->parent; hdev->of_node = tdev ? tdev->of_node : NULL; hwdev->chip = chip; dev_set_drvdata(hdev, drvdata); dev_set_name(hdev, HWMON_ID_FORMAT, id); err = device_register(hdev); if (err) { put_device(hdev); goto ida_remove; } INIT_LIST_HEAD(&hwdev->tzdata); if (hdev->of_node && chip && chip->ops->read && chip->info[0]->type == hwmon_chip) { u32 config = chip->info[0]->config[0]; if (config & HWMON_C_REGISTER_TZ) { err = hwmon_thermal_register_sensors(hdev); if (err) { device_unregister(hdev); /* * Don't worry about hwdev; hwmon_dev_release(), * called from device_unregister(), will free it. */ goto ida_remove; } } if (config & HWMON_C_PEC) { err = hwmon_pec_register(hdev); if (err) { device_unregister(hdev); goto ida_remove; } } } return hdev; free_hwmon: hwmon_dev_release(hdev); ida_remove: ida_free(&hwmon_ida, id); return ERR_PTR(err); } /** * hwmon_device_register_with_groups - register w/ hwmon * @dev: the parent device * @name: hwmon name attribute * @drvdata: driver data to attach to created device * @groups: List of attribute groups to create * * hwmon_device_unregister() must be called when the device is no * longer needed. * * Returns the pointer to the new device. */ struct device * hwmon_device_register_with_groups(struct device *dev, const char *name, void *drvdata, const struct attribute_group **groups) { if (!name) return ERR_PTR(-EINVAL); return __hwmon_device_register(dev, name, drvdata, NULL, groups); } EXPORT_SYMBOL_GPL(hwmon_device_register_with_groups); /** * hwmon_device_register_with_info - register w/ hwmon * @dev: the parent device (mandatory) * @name: hwmon name attribute (mandatory) * @drvdata: driver data to attach to created device (optional) * @chip: pointer to hwmon chip information (mandatory) * @extra_groups: pointer to list of additional non-standard attribute groups * (optional) * * hwmon_device_unregister() must be called when the device is no * longer needed. * * Returns the pointer to the new device. */ struct device * hwmon_device_register_with_info(struct device *dev, const char *name, void *drvdata, const struct hwmon_chip_info *chip, const struct attribute_group **extra_groups) { if (!dev || !name || !chip) return ERR_PTR(-EINVAL); if (!chip->ops || !(chip->ops->visible || chip->ops->is_visible) || !chip->info) return ERR_PTR(-EINVAL); return __hwmon_device_register(dev, name, drvdata, chip, extra_groups); } EXPORT_SYMBOL_GPL(hwmon_device_register_with_info); /** * hwmon_device_register_for_thermal - register hwmon device for thermal subsystem * @dev: the parent device * @name: hwmon name attribute * @drvdata: driver data to attach to created device * * The use of this function is restricted. It is provided for legacy reasons * and must only be called from the thermal subsystem. * * hwmon_device_unregister() must be called when the device is no * longer needed. * * Returns the pointer to the new device. */ struct device * hwmon_device_register_for_thermal(struct device *dev, const char *name, void *drvdata) { if (!name || !dev) return ERR_PTR(-EINVAL); return __hwmon_device_register(dev, name, drvdata, NULL, NULL); } EXPORT_SYMBOL_NS_GPL(hwmon_device_register_for_thermal, "HWMON_THERMAL"); /** * hwmon_device_register - register w/ hwmon * @dev: the device to register * * hwmon_device_unregister() must be called when the device is no * longer needed. * * Returns the pointer to the new device. */ struct device *hwmon_device_register(struct device *dev) { dev_warn(dev, "hwmon_device_register() is deprecated. Please convert the driver to use hwmon_device_register_with_info().\n"); return __hwmon_device_register(dev, NULL, NULL, NULL, NULL); } EXPORT_SYMBOL_GPL(hwmon_device_register); /** * hwmon_device_unregister - removes the previously registered class device * * @dev: the class device to destroy */ void hwmon_device_unregister(struct device *dev) { int id; if (likely(sscanf(dev_name(dev), HWMON_ID_FORMAT, &id) == 1)) { device_unregister(dev); ida_free(&hwmon_ida, id); } else dev_dbg(dev->parent, "hwmon_device_unregister() failed: bad class ID!\n"); } EXPORT_SYMBOL_GPL(hwmon_device_unregister); static void devm_hwmon_release(struct device *dev, void *res) { struct device *hwdev = *(struct device **)res; hwmon_device_unregister(hwdev); } /** * devm_hwmon_device_register_with_groups - register w/ hwmon * @dev: the parent device * @name: hwmon name attribute * @drvdata: driver data to attach to created device * @groups: List of attribute groups to create * * Returns the pointer to the new device. The new device is automatically * unregistered with the parent device. */ struct device * devm_hwmon_device_register_with_groups(struct device *dev, const char *name, void *drvdata, const struct attribute_group **groups) { struct device **ptr, *hwdev; if (!dev) return ERR_PTR(-EINVAL); ptr = devres_alloc(devm_hwmon_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return ERR_PTR(-ENOMEM); hwdev = hwmon_device_register_with_groups(dev, name, drvdata, groups); if (IS_ERR(hwdev)) goto error; *ptr = hwdev; devres_add(dev, ptr); return hwdev; error: devres_free(ptr); return hwdev; } EXPORT_SYMBOL_GPL(devm_hwmon_device_register_with_groups); /** * devm_hwmon_device_register_with_info - register w/ hwmon * @dev: the parent device * @name: hwmon name attribute * @drvdata: driver data to attach to created device * @chip: pointer to hwmon chip information * @extra_groups: pointer to list of driver specific attribute groups * * Returns the pointer to the new device. The new device is automatically * unregistered with the parent device. */ struct device * devm_hwmon_device_register_with_info(struct device *dev, const char *name, void *drvdata, const struct hwmon_chip_info *chip, const struct attribute_group **extra_groups) { struct device **ptr, *hwdev; if (!dev) return ERR_PTR(-EINVAL); if (!name) { name = devm_hwmon_sanitize_name(dev, dev_name(dev)); if (IS_ERR(name)) return ERR_CAST(name); } ptr = devres_alloc(devm_hwmon_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return ERR_PTR(-ENOMEM); hwdev = hwmon_device_register_with_info(dev, name, drvdata, chip, extra_groups); if (IS_ERR(hwdev)) goto error; *ptr = hwdev; devres_add(dev, ptr); return hwdev; error: devres_free(ptr); return hwdev; } EXPORT_SYMBOL_GPL(devm_hwmon_device_register_with_info); static char *__hwmon_sanitize_name(struct device *dev, const char *old_name) { char *name, *p; if (dev) name = devm_kstrdup(dev, old_name, GFP_KERNEL); else name = kstrdup(old_name, GFP_KERNEL); if (!name) return ERR_PTR(-ENOMEM); for (p = name; *p; p++) if (hwmon_is_bad_char(*p)) *p = '_'; return name; } /** * hwmon_sanitize_name - Replaces invalid characters in a hwmon name * @name: NUL-terminated name * * Allocates a new string where any invalid characters will be replaced * by an underscore. It is the responsibility of the caller to release * the memory. * * Returns newly allocated name, or ERR_PTR on error. */ char *hwmon_sanitize_name(const char *name) { return __hwmon_sanitize_name(NULL, name); } EXPORT_SYMBOL_GPL(hwmon_sanitize_name); /** * devm_hwmon_sanitize_name - resource managed hwmon_sanitize_name() * @dev: device to allocate memory for * @name: NUL-terminated name * * Allocates a new string where any invalid characters will be replaced * by an underscore. * * Returns newly allocated name, or ERR_PTR on error. */ char *devm_hwmon_sanitize_name(struct device *dev, const char *name) { if (!dev) return ERR_PTR(-EINVAL); return __hwmon_sanitize_name(dev, name); } EXPORT_SYMBOL_GPL(devm_hwmon_sanitize_name); static void __init hwmon_pci_quirks(void) { #if defined CONFIG_X86 && defined CONFIG_PCI struct pci_dev *sb; u16 base; u8 enable; /* Open access to 0x295-0x296 on MSI MS-7031 */ sb = pci_get_device(PCI_VENDOR_ID_ATI, 0x436c, NULL); if (sb) { if (sb->subsystem_vendor == 0x1462 && /* MSI */ sb->subsystem_device == 0x0031) { /* MS-7031 */ pci_read_config_byte(sb, 0x48, &enable); pci_read_config_word(sb, 0x64, &base); if (base == 0 && !(enable & BIT(2))) { dev_info(&sb->dev, "Opening wide generic port at 0x295\n"); pci_write_config_word(sb, 0x64, 0x295); pci_write_config_byte(sb, 0x48, enable | BIT(2)); } } pci_dev_put(sb); } #endif } static int __init hwmon_init(void) { int err; hwmon_pci_quirks(); err = class_register(&hwmon_class); if (err) { pr_err("couldn't register hwmon sysfs class\n"); return err; } return 0; } static void __exit hwmon_exit(void) { class_unregister(&hwmon_class); } subsys_initcall(hwmon_init); module_exit(hwmon_exit); MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>"); MODULE_DESCRIPTION("hardware monitoring sysfs/class support"); MODULE_LICENSE("GPL");
8 5 10 9 8 6 4 10 10 8 9 10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 // SPDX-License-Identifier: GPL-2.0-only /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> */ #include <linux/module.h> #include <net/ipv6.h> #include <net/ip6_route.h> #include <net/ip6_fib.h> #include <net/ip6_checksum.h> #include <net/netfilter/ipv6/nf_reject.h> #include <linux/netfilter_ipv6.h> #include <linux/netfilter_bridge.h> static bool nf_reject_v6_csum_ok(struct sk_buff *skb, int hook) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); int thoff; __be16 fo; u8 proto = ip6h->nexthdr; if (skb_csum_unnecessary(skb)) return true; if (ip6h->payload_len && pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h))) return false; ip6h = ipv6_hdr(skb); thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo); if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0) return false; if (!nf_reject_verify_csum(skb, thoff, proto)) return true; return nf_ip6_checksum(skb, hook, thoff, proto) == 0; } static int nf_reject_ip6hdr_validate(struct sk_buff *skb) { struct ipv6hdr *hdr; u32 pkt_len; if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) return 0; hdr = ipv6_hdr(skb); if (hdr->version != 6) return 0; pkt_len = ntohs(hdr->payload_len); if (pkt_len + sizeof(struct ipv6hdr) > skb->len) return 0; return 1; } struct sk_buff *nf_reject_skb_v6_tcp_reset(struct net *net, struct sk_buff *oldskb, const struct net_device *dev, int hook) { struct sk_buff *nskb; const struct tcphdr *oth; struct tcphdr _oth; unsigned int otcplen; struct ipv6hdr *nip6h; if (!nf_reject_ip6hdr_validate(oldskb)) return NULL; oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook); if (!oth) return NULL; nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) + LL_MAX_HEADER, GFP_ATOMIC); if (!nskb) return NULL; nskb->dev = (struct net_device *)dev; skb_reserve(nskb, LL_MAX_HEADER); nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP, READ_ONCE(net->ipv6.devconf_all->hop_limit)); nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen); nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr)); return nskb; } EXPORT_SYMBOL_GPL(nf_reject_skb_v6_tcp_reset); struct sk_buff *nf_reject_skb_v6_unreach(struct net *net, struct sk_buff *oldskb, const struct net_device *dev, int hook, u8 code) { struct sk_buff *nskb; struct ipv6hdr *nip6h; struct icmp6hdr *icmp6h; unsigned int len; if (!nf_reject_ip6hdr_validate(oldskb)) return NULL; /* Include "As much of invoking packet as possible without the ICMPv6 * packet exceeding the minimum IPv6 MTU" in the ICMP payload. */ len = min_t(unsigned int, 1220, oldskb->len); if (!pskb_may_pull(oldskb, len)) return NULL; if (!nf_reject_v6_csum_ok(oldskb, hook)) return NULL; nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr) + LL_MAX_HEADER + len, GFP_ATOMIC); if (!nskb) return NULL; nskb->dev = (struct net_device *)dev; skb_reserve(nskb, LL_MAX_HEADER); nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6, READ_ONCE(net->ipv6.devconf_all->hop_limit)); skb_reset_transport_header(nskb); icmp6h = skb_put_zero(nskb, sizeof(struct icmp6hdr)); icmp6h->icmp6_type = ICMPV6_DEST_UNREACH; icmp6h->icmp6_code = code; skb_put_data(nskb, skb_network_header(oldskb), len); nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr)); icmp6h->icmp6_cksum = csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr, nskb->len - sizeof(struct ipv6hdr), IPPROTO_ICMPV6, csum_partial(icmp6h, nskb->len - sizeof(struct ipv6hdr), 0)); return nskb; } EXPORT_SYMBOL_GPL(nf_reject_skb_v6_unreach); const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb, struct tcphdr *otcph, unsigned int *otcplen, int hook) { const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); u8 proto; __be16 frag_off; int tcphoff; proto = oip6h->nexthdr; tcphoff = ipv6_skip_exthdr(oldskb, ((u8 *)(oip6h + 1) - oldskb->data), &proto, &frag_off); if ((tcphoff < 0) || (tcphoff > oldskb->len)) { pr_debug("Cannot get TCP header.\n"); return NULL; } *otcplen = oldskb->len - tcphoff; /* IP header checks: fragment, too short. */ if (proto != IPPROTO_TCP || *otcplen < sizeof(struct tcphdr)) { pr_debug("proto(%d) != IPPROTO_TCP or too short (len = %d)\n", proto, *otcplen); return NULL; } otcph = skb_header_pointer(oldskb, tcphoff, sizeof(struct tcphdr), otcph); if (otcph == NULL) return NULL; /* No RST for RST. */ if (otcph->rst) { pr_debug("RST is set\n"); return NULL; } /* Check checksum. */ if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) { pr_debug("TCP checksum is invalid\n"); return NULL; } return otcph; } EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_get); struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb, __u8 protocol, int hoplimit) { struct ipv6hdr *ip6h; const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); #define DEFAULT_TOS_VALUE 0x0U const __u8 tclass = DEFAULT_TOS_VALUE; skb_put(nskb, sizeof(struct ipv6hdr)); skb_reset_network_header(nskb); ip6h = ipv6_hdr(nskb); ip6_flow_hdr(ip6h, tclass, 0); ip6h->hop_limit = hoplimit; ip6h->nexthdr = protocol; ip6h->saddr = oip6h->daddr; ip6h->daddr = oip6h->saddr; nskb->protocol = htons(ETH_P_IPV6); return ip6h; } EXPORT_SYMBOL_GPL(nf_reject_ip6hdr_put); void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb, const struct tcphdr *oth, unsigned int otcplen) { struct tcphdr *tcph; skb_reset_transport_header(nskb); tcph = skb_put_zero(nskb, sizeof(struct tcphdr)); /* Truncate to length (no data) */ tcph->doff = sizeof(struct tcphdr)/4; tcph->source = oth->dest; tcph->dest = oth->source; if (oth->ack) { tcph->seq = oth->ack_seq; } else { tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + otcplen - (oth->doff<<2)); tcph->ack = 1; } tcph->rst = 1; /* Adjust TCP checksum */ tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr, &ipv6_hdr(nskb)->daddr, sizeof(struct tcphdr), IPPROTO_TCP, csum_partial(tcph, sizeof(struct tcphdr), 0)); } EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_put); static int nf_reject6_fill_skb_dst(struct sk_buff *skb_in) { struct dst_entry *dst = NULL; struct flowi fl; memset(&fl, 0, sizeof(struct flowi)); fl.u.ip6.daddr = ipv6_hdr(skb_in)->saddr; nf_ip6_route(dev_net(skb_in->dev), &dst, &fl, false); if (!dst) return -1; skb_dst_set(skb_in, dst); return 0; } void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb, int hook) { const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); struct dst_entry *dst = NULL; const struct tcphdr *otcph; struct sk_buff *nskb; struct tcphdr _otcph; unsigned int otcplen; struct flowi6 fl6; if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) || (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) { pr_debug("addr is not unicast.\n"); return; } otcph = nf_reject_ip6_tcphdr_get(oldskb, &_otcph, &otcplen, hook); if (!otcph) return; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_TCP; fl6.saddr = oip6h->daddr; fl6.daddr = oip6h->saddr; fl6.fl6_sport = otcph->dest; fl6.fl6_dport = otcph->source; if (hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) { nf_ip6_route(net, &dst, flowi6_to_flowi(&fl6), false); if (!dst) return; skb_dst_set(oldskb, dst); } fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev); fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark); security_skb_classify_flow(oldskb, flowi6_to_flowi_common(&fl6)); dst = ip6_route_output(net, NULL, &fl6); if (dst->error) { dst_release(dst); return; } dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0); if (IS_ERR(dst)) return; nskb = alloc_skb(LL_MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr) + dst->trailer_len, GFP_ATOMIC); if (!nskb) { net_dbg_ratelimited("cannot alloc skb\n"); dst_release(dst); return; } skb_dst_set(nskb, dst); nskb->mark = fl6.flowi6_mark; skb_reserve(nskb, LL_MAX_HEADER); nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP, ip6_dst_hoplimit(dst)); nf_reject_ip6_tcphdr_put(nskb, oldskb, otcph, otcplen); nf_ct_attach(nskb, oldskb); nf_ct_set_closing(skb_nfct(oldskb)); #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) /* If we use ip6_local_out for bridged traffic, the MAC source on * the RST will be ours, instead of the destination's. This confuses * some routers/firewalls, and they drop the packet. So we need to * build the eth header using the original destination's MAC as the * source, and send the RST packet directly. */ if (nf_bridge_info_exists(oldskb)) { struct ethhdr *oeth = eth_hdr(oldskb); struct ipv6hdr *ip6h = ipv6_hdr(nskb); struct net_device *br_indev; br_indev = nf_bridge_get_physindev(oldskb, net); if (!br_indev) { kfree_skb(nskb); return; } nskb->dev = br_indev; nskb->protocol = htons(ETH_P_IPV6); ip6h->payload_len = htons(sizeof(struct tcphdr)); if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol), oeth->h_source, oeth->h_dest, nskb->len) < 0) { kfree_skb(nskb); return; } dev_queue_xmit(nskb); } else #endif ip6_local_out(net, sk, nskb); } EXPORT_SYMBOL_GPL(nf_send_reset6); static bool reject6_csum_ok(struct sk_buff *skb, int hook) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); int thoff; __be16 fo; u8 proto; if (skb_csum_unnecessary(skb)) return true; proto = ip6h->nexthdr; thoff = ipv6_skip_exthdr(skb, ((u8 *)(ip6h + 1) - skb->data), &proto, &fo); if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0) return false; if (!nf_reject_verify_csum(skb, thoff, proto)) return true; return nf_ip6_checksum(skb, hook, thoff, proto) == 0; } void nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code, unsigned int hooknum) { if (!reject6_csum_ok(skb_in, hooknum)) return; if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL) skb_in->dev = net->loopback_dev; if ((hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_INGRESS) && nf_reject6_fill_skb_dst(skb_in) < 0) return; icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0); } EXPORT_SYMBOL_GPL(nf_send_unreach6); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("IPv6 packet rejection core");
12 10 15 21 23 6 3 11 12 11 12 12 12 4 4 4 3 3 5 60 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2023 Isovalent */ #ifndef __BPF_MPROG_H #define __BPF_MPROG_H #include <linux/bpf.h> /* bpf_mprog framework: * * bpf_mprog is a generic layer for multi-program attachment. In-kernel users * of the bpf_mprog don't need to care about the dependency resolution * internals, they can just consume it with few API calls. Currently available * dependency directives are BPF_F_{BEFORE,AFTER} which enable insertion of * a BPF program or BPF link relative to an existing BPF program or BPF link * inside the multi-program array as well as prepend and append behavior if * no relative object was specified, see corresponding selftests for concrete * examples (e.g. tc_links and tc_opts test cases of test_progs). * * Usage of bpf_mprog_{attach,detach,query}() core APIs with pseudo code: * * Attach case: * * struct bpf_mprog_entry *entry, *entry_new; * int ret; * * // bpf_mprog user-side lock * // fetch active @entry from attach location * [...] * ret = bpf_mprog_attach(entry, &entry_new, [...]); * if (!ret) { * if (entry != entry_new) { * // swap @entry to @entry_new at attach location * // ensure there are no inflight users of @entry: * synchronize_rcu(); * } * bpf_mprog_commit(entry); * } else { * // error path, bail out, propagate @ret * } * // bpf_mprog user-side unlock * * Detach case: * * struct bpf_mprog_entry *entry, *entry_new; * int ret; * * // bpf_mprog user-side lock * // fetch active @entry from attach location * [...] * ret = bpf_mprog_detach(entry, &entry_new, [...]); * if (!ret) { * // all (*) marked is optional and depends on the use-case * // whether bpf_mprog_bundle should be freed or not * if (!bpf_mprog_total(entry_new)) (*) * entry_new = NULL (*) * // swap @entry to @entry_new at attach location * // ensure there are no inflight users of @entry: * synchronize_rcu(); * bpf_mprog_commit(entry); * if (!entry_new) (*) * // free bpf_mprog_bundle (*) * } else { * // error path, bail out, propagate @ret * } * // bpf_mprog user-side unlock * * Query case: * * struct bpf_mprog_entry *entry; * int ret; * * // bpf_mprog user-side lock * // fetch active @entry from attach location * [...] * ret = bpf_mprog_query(attr, uattr, entry); * // bpf_mprog user-side unlock * * Data/fast path: * * struct bpf_mprog_entry *entry; * struct bpf_mprog_fp *fp; * struct bpf_prog *prog; * int ret = [...]; * * rcu_read_lock(); * // fetch active @entry from attach location * [...] * bpf_mprog_foreach_prog(entry, fp, prog) { * ret = bpf_prog_run(prog, [...]); * // process @ret from program * } * [...] * rcu_read_unlock(); * * bpf_mprog locking considerations: * * bpf_mprog_{attach,detach,query}() must be protected by an external lock * (like RTNL in case of tcx). * * bpf_mprog_entry pointer can be an __rcu annotated pointer (in case of tcx * the netdevice has tcx_ingress and tcx_egress __rcu pointer) which gets * updated via rcu_assign_pointer() pointing to the active bpf_mprog_entry of * the bpf_mprog_bundle. * * Fast path accesses the active bpf_mprog_entry within RCU critical section * (in case of tcx it runs in NAPI which provides RCU protection there, * other users might need explicit rcu_read_lock()). The bpf_mprog_commit() * assumes that for the old bpf_mprog_entry there are no inflight users * anymore. * * The READ_ONCE()/WRITE_ONCE() pairing for bpf_mprog_fp's prog access is for * the replacement case where we don't swap the bpf_mprog_entry. */ #define bpf_mprog_foreach_tuple(entry, fp, cp, t) \ for (fp = &entry->fp_items[0], cp = &entry->parent->cp_items[0];\ ({ \ t.prog = READ_ONCE(fp->prog); \ t.link = cp->link; \ t.prog; \ }); \ fp++, cp++) #define bpf_mprog_foreach_prog(entry, fp, p) \ for (fp = &entry->fp_items[0]; \ (p = READ_ONCE(fp->prog)); \ fp++) #define BPF_MPROG_MAX 64 struct bpf_mprog_fp { struct bpf_prog *prog; }; struct bpf_mprog_cp { struct bpf_link *link; }; struct bpf_mprog_entry { struct bpf_mprog_fp fp_items[BPF_MPROG_MAX]; struct bpf_mprog_bundle *parent; }; struct bpf_mprog_bundle { struct bpf_mprog_entry a; struct bpf_mprog_entry b; struct bpf_mprog_cp cp_items[BPF_MPROG_MAX]; struct bpf_prog *ref; atomic64_t revision; u32 count; }; struct bpf_tuple { struct bpf_prog *prog; struct bpf_link *link; }; static inline struct bpf_mprog_entry * bpf_mprog_peer(const struct bpf_mprog_entry *entry) { if (entry == &entry->parent->a) return &entry->parent->b; else return &entry->parent->a; } static inline void bpf_mprog_bundle_init(struct bpf_mprog_bundle *bundle) { BUILD_BUG_ON(sizeof(bundle->a.fp_items[0]) > sizeof(u64)); BUILD_BUG_ON(ARRAY_SIZE(bundle->a.fp_items) != ARRAY_SIZE(bundle->cp_items)); memset(bundle, 0, sizeof(*bundle)); atomic64_set(&bundle->revision, 1); bundle->a.parent = bundle; bundle->b.parent = bundle; } static inline void bpf_mprog_inc(struct bpf_mprog_entry *entry) { entry->parent->count++; } static inline void bpf_mprog_dec(struct bpf_mprog_entry *entry) { entry->parent->count--; } static inline int bpf_mprog_max(void) { return ARRAY_SIZE(((struct bpf_mprog_entry *)NULL)->fp_items) - 1; } static inline int bpf_mprog_total(struct bpf_mprog_entry *entry) { int total = entry->parent->count; WARN_ON_ONCE(total > bpf_mprog_max()); return total; } static inline bool bpf_mprog_exists(struct bpf_mprog_entry *entry, struct bpf_prog *prog) { const struct bpf_mprog_fp *fp; const struct bpf_prog *tmp; bpf_mprog_foreach_prog(entry, fp, tmp) { if (tmp == prog) return true; } return false; } static inline void bpf_mprog_mark_for_release(struct bpf_mprog_entry *entry, struct bpf_tuple *tuple) { WARN_ON_ONCE(entry->parent->ref); if (!tuple->link) entry->parent->ref = tuple->prog; } static inline void bpf_mprog_complete_release(struct bpf_mprog_entry *entry) { /* In the non-link case prog deletions can only drop the reference * to the prog after the bpf_mprog_entry got swapped and the * bpf_mprog ensured that there are no inflight users anymore. * * Paired with bpf_mprog_mark_for_release(). */ if (entry->parent->ref) { bpf_prog_put(entry->parent->ref); entry->parent->ref = NULL; } } static inline void bpf_mprog_revision_new(struct bpf_mprog_entry *entry) { atomic64_inc(&entry->parent->revision); } static inline void bpf_mprog_commit(struct bpf_mprog_entry *entry) { bpf_mprog_complete_release(entry); bpf_mprog_revision_new(entry); } static inline u64 bpf_mprog_revision(struct bpf_mprog_entry *entry) { return atomic64_read(&entry->parent->revision); } static inline void bpf_mprog_entry_copy(struct bpf_mprog_entry *dst, struct bpf_mprog_entry *src) { memcpy(dst->fp_items, src->fp_items, sizeof(src->fp_items)); } static inline void bpf_mprog_entry_clear(struct bpf_mprog_entry *dst) { memset(dst->fp_items, 0, sizeof(dst->fp_items)); } static inline void bpf_mprog_clear_all(struct bpf_mprog_entry *entry, struct bpf_mprog_entry **entry_new) { struct bpf_mprog_entry *peer; peer = bpf_mprog_peer(entry); bpf_mprog_entry_clear(peer); peer->parent->count = 0; *entry_new = peer; } static inline void bpf_mprog_entry_grow(struct bpf_mprog_entry *entry, int idx) { int total = bpf_mprog_total(entry); memmove(entry->fp_items + idx + 1, entry->fp_items + idx, (total - idx) * sizeof(struct bpf_mprog_fp)); memmove(entry->parent->cp_items + idx + 1, entry->parent->cp_items + idx, (total - idx) * sizeof(struct bpf_mprog_cp)); } static inline void bpf_mprog_entry_shrink(struct bpf_mprog_entry *entry, int idx) { /* Total array size is needed in this case to enure the NULL * entry is copied at the end. */ int total = ARRAY_SIZE(entry->fp_items); memmove(entry->fp_items + idx, entry->fp_items + idx + 1, (total - idx - 1) * sizeof(struct bpf_mprog_fp)); memmove(entry->parent->cp_items + idx, entry->parent->cp_items + idx + 1, (total - idx - 1) * sizeof(struct bpf_mprog_cp)); } static inline void bpf_mprog_read(struct bpf_mprog_entry *entry, u32 idx, struct bpf_mprog_fp **fp, struct bpf_mprog_cp **cp) { *fp = &entry->fp_items[idx]; *cp = &entry->parent->cp_items[idx]; } static inline void bpf_mprog_write(struct bpf_mprog_fp *fp, struct bpf_mprog_cp *cp, struct bpf_tuple *tuple) { WRITE_ONCE(fp->prog, tuple->prog); cp->link = tuple->link; } int bpf_mprog_attach(struct bpf_mprog_entry *entry, struct bpf_mprog_entry **entry_new, struct bpf_prog *prog_new, struct bpf_link *link, struct bpf_prog *prog_old, u32 flags, u32 id_or_fd, u64 revision); int bpf_mprog_detach(struct bpf_mprog_entry *entry, struct bpf_mprog_entry **entry_new, struct bpf_prog *prog, struct bpf_link *link, u32 flags, u32 id_or_fd, u64 revision); int bpf_mprog_query(const union bpf_attr *attr, union bpf_attr __user *uattr, struct bpf_mprog_entry *entry); static inline bool bpf_mprog_supported(enum bpf_prog_type type) { switch (type) { case BPF_PROG_TYPE_SCHED_CLS: return true; default: return false; } } #endif /* __BPF_MPROG_H */
76 56 22 12 7 20 52 51 57 57 26 26 10 8 113 113 106 111 28 101 291 84 10 10 9 10 8 8 8 8 8 42 1 1 1 4 4 4 4 4 4 4 13 12 12 12 4 4 4 4 12 13 4 1 1 1 1 1 1 1 1 1 1 1 4 10 85 92 93 93 91 61 60 60 25 25 25 28 66 73 28 14 54 25 9 114 11 18 26 20 11 57 6 6 6 12 84 4 12 6 84 25 26 26 26 7 85 84 8 2 6 6 6 7 8 84 84 6 83 84 84 76 79 77 79 6 6 6 6 3 3 2 2 2 2 2 1 1 6 6 6 6 6 6 7 7 7 7 7 7 7 7 7 6 6 6 6 1 6 2 3 3 3 3 3 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 3 15 2 3 3 3 3 3 2 3 3 3 3 3 13 15 4 15 15 13 3 4 4 4 2 2 2 4 3 1 3 3 53 52 25 17 11 52 53 52 51 52 52 52 40 40 39 40 40 39 6 6 6 6 5 5 3 2 4 6 18 18 5 17 5 1 47 13 4 9 7 3 2 11 47 13 57 58 40 18 17 28 3 2 12 1 12 12 1 1 12 6 1 6 6 6 6 12 105 105 65 105 105 1 13 60 8 26 8 18 59 45 60 60 53 7 54 46 36 30 19 6 2 13 47 50 50 40 42 1 41 50 41 19 4 22 3 21 22 23 50 6 6 1 2 1148 1150 2 2 1 1 1 2 295 291 272 69 70 291 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 20 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2005 Voltaire Inc. All rights reserved. * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. * Copyright (c) 1999-2019, Mellanox Technologies, Inc. All rights reserved. * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. */ #include <linux/completion.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/mutex.h> #include <linux/random.h> #include <linux/rbtree.h> #include <linux/igmp.h> #include <linux/xarray.h> #include <linux/inetdevice.h> #include <linux/slab.h> #include <linux/module.h> #include <net/route.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/netevent.h> #include <net/tcp.h> #include <net/ipv6.h> #include <net/ip_fib.h> #include <net/ip6_route.h> #include <rdma/rdma_cm.h> #include <rdma/rdma_cm_ib.h> #include <rdma/rdma_netlink.h> #include <rdma/ib.h> #include <rdma/ib_cache.h> #include <rdma/ib_cm.h> #include <rdma/ib_sa.h> #include <rdma/iw_cm.h> #include "core_priv.h" #include "cma_priv.h" #include "cma_trace.h" MODULE_AUTHOR("Sean Hefty"); MODULE_DESCRIPTION("Generic RDMA CM Agent"); MODULE_LICENSE("Dual BSD/GPL"); #define CMA_CM_RESPONSE_TIMEOUT 20 #define CMA_MAX_CM_RETRIES 15 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) #define CMA_IBOE_PACKET_LIFETIME 16 #define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP static const char * const cma_events[] = { [RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved", [RDMA_CM_EVENT_ADDR_ERROR] = "address error", [RDMA_CM_EVENT_ROUTE_RESOLVED] = "route resolved ", [RDMA_CM_EVENT_ROUTE_ERROR] = "route error", [RDMA_CM_EVENT_CONNECT_REQUEST] = "connect request", [RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response", [RDMA_CM_EVENT_CONNECT_ERROR] = "connect error", [RDMA_CM_EVENT_UNREACHABLE] = "unreachable", [RDMA_CM_EVENT_REJECTED] = "rejected", [RDMA_CM_EVENT_ESTABLISHED] = "established", [RDMA_CM_EVENT_DISCONNECTED] = "disconnected", [RDMA_CM_EVENT_DEVICE_REMOVAL] = "device removal", [RDMA_CM_EVENT_MULTICAST_JOIN] = "multicast join", [RDMA_CM_EVENT_MULTICAST_ERROR] = "multicast error", [RDMA_CM_EVENT_ADDR_CHANGE] = "address change", [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit", }; static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid, enum ib_gid_type gid_type); const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event) { size_t index = event; return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ? cma_events[index] : "unrecognized event"; } EXPORT_SYMBOL(rdma_event_msg); const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id, int reason) { if (rdma_ib_or_roce(id->device, id->port_num)) return ibcm_reject_msg(reason); if (rdma_protocol_iwarp(id->device, id->port_num)) return iwcm_reject_msg(reason); WARN_ON_ONCE(1); return "unrecognized transport"; } EXPORT_SYMBOL(rdma_reject_msg); /** * rdma_is_consumer_reject - return true if the consumer rejected the connect * request. * @id: Communication identifier that received the REJECT event. * @reason: Value returned in the REJECT event status field. */ static bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason) { if (rdma_ib_or_roce(id->device, id->port_num)) return reason == IB_CM_REJ_CONSUMER_DEFINED; if (rdma_protocol_iwarp(id->device, id->port_num)) return reason == -ECONNREFUSED; WARN_ON_ONCE(1); return false; } const void *rdma_consumer_reject_data(struct rdma_cm_id *id, struct rdma_cm_event *ev, u8 *data_len) { const void *p; if (rdma_is_consumer_reject(id, ev->status)) { *data_len = ev->param.conn.private_data_len; p = ev->param.conn.private_data; } else { *data_len = 0; p = NULL; } return p; } EXPORT_SYMBOL(rdma_consumer_reject_data); /** * rdma_iw_cm_id() - return the iw_cm_id pointer for this cm_id. * @id: Communication Identifier */ struct iw_cm_id *rdma_iw_cm_id(struct rdma_cm_id *id) { struct rdma_id_private *id_priv; id_priv = container_of(id, struct rdma_id_private, id); if (id->device->node_type == RDMA_NODE_RNIC) return id_priv->cm_id.iw; return NULL; } EXPORT_SYMBOL(rdma_iw_cm_id); /** * rdma_res_to_id() - return the rdma_cm_id pointer for this restrack. * @res: rdma resource tracking entry pointer */ struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res) { struct rdma_id_private *id_priv = container_of(res, struct rdma_id_private, res); return &id_priv->id; } EXPORT_SYMBOL(rdma_res_to_id); static int cma_add_one(struct ib_device *device); static void cma_remove_one(struct ib_device *device, void *client_data); static struct ib_client cma_client = { .name = "cma", .add = cma_add_one, .remove = cma_remove_one }; static struct ib_sa_client sa_client; static LIST_HEAD(dev_list); static LIST_HEAD(listen_any_list); static DEFINE_MUTEX(lock); static struct rb_root id_table = RB_ROOT; /* Serialize operations of id_table tree */ static DEFINE_SPINLOCK(id_table_lock); static struct workqueue_struct *cma_wq; static unsigned int cma_pernet_id; struct cma_pernet { struct xarray tcp_ps; struct xarray udp_ps; struct xarray ipoib_ps; struct xarray ib_ps; }; static struct cma_pernet *cma_pernet(struct net *net) { return net_generic(net, cma_pernet_id); } static struct xarray *cma_pernet_xa(struct net *net, enum rdma_ucm_port_space ps) { struct cma_pernet *pernet = cma_pernet(net); switch (ps) { case RDMA_PS_TCP: return &pernet->tcp_ps; case RDMA_PS_UDP: return &pernet->udp_ps; case RDMA_PS_IPOIB: return &pernet->ipoib_ps; case RDMA_PS_IB: return &pernet->ib_ps; default: return NULL; } } struct id_table_entry { struct list_head id_list; struct rb_node rb_node; }; struct cma_device { struct list_head list; struct ib_device *device; struct completion comp; refcount_t refcount; struct list_head id_list; enum ib_gid_type *default_gid_type; u8 *default_roce_tos; }; struct rdma_bind_list { enum rdma_ucm_port_space ps; struct hlist_head owners; unsigned short port; }; static int cma_ps_alloc(struct net *net, enum rdma_ucm_port_space ps, struct rdma_bind_list *bind_list, int snum) { struct xarray *xa = cma_pernet_xa(net, ps); return xa_insert(xa, snum, bind_list, GFP_KERNEL); } static struct rdma_bind_list *cma_ps_find(struct net *net, enum rdma_ucm_port_space ps, int snum) { struct xarray *xa = cma_pernet_xa(net, ps); return xa_load(xa, snum); } static void cma_ps_remove(struct net *net, enum rdma_ucm_port_space ps, int snum) { struct xarray *xa = cma_pernet_xa(net, ps); xa_erase(xa, snum); } enum { CMA_OPTION_AFONLY, }; void cma_dev_get(struct cma_device *cma_dev) { refcount_inc(&cma_dev->refcount); } void cma_dev_put(struct cma_device *cma_dev) { if (refcount_dec_and_test(&cma_dev->refcount)) complete(&cma_dev->comp); } struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, void *cookie) { struct cma_device *cma_dev; struct cma_device *found_cma_dev = NULL; mutex_lock(&lock); list_for_each_entry(cma_dev, &dev_list, list) if (filter(cma_dev->device, cookie)) { found_cma_dev = cma_dev; break; } if (found_cma_dev) cma_dev_get(found_cma_dev); mutex_unlock(&lock); return found_cma_dev; } int cma_get_default_gid_type(struct cma_device *cma_dev, u32 port) { if (!rdma_is_port_valid(cma_dev->device, port)) return -EINVAL; return cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)]; } int cma_set_default_gid_type(struct cma_device *cma_dev, u32 port, enum ib_gid_type default_gid_type) { unsigned long supported_gids; if (!rdma_is_port_valid(cma_dev->device, port)) return -EINVAL; if (default_gid_type == IB_GID_TYPE_IB && rdma_protocol_roce_eth_encap(cma_dev->device, port)) default_gid_type = IB_GID_TYPE_ROCE; supported_gids = roce_gid_type_mask_support(cma_dev->device, port); if (!(supported_gids & 1 << default_gid_type)) return -EINVAL; cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)] = default_gid_type; return 0; } int cma_get_default_roce_tos(struct cma_device *cma_dev, u32 port) { if (!rdma_is_port_valid(cma_dev->device, port)) return -EINVAL; return cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)]; } int cma_set_default_roce_tos(struct cma_device *cma_dev, u32 port, u8 default_roce_tos) { if (!rdma_is_port_valid(cma_dev->device, port)) return -EINVAL; cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)] = default_roce_tos; return 0; } struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev) { return cma_dev->device; } /* * Device removal can occur at anytime, so we need extra handling to * serialize notifying the user of device removal with other callbacks. * We do this by disabling removal notification while a callback is in process, * and reporting it after the callback completes. */ struct cma_multicast { struct rdma_id_private *id_priv; union { struct ib_sa_multicast *sa_mc; struct { struct work_struct work; struct rdma_cm_event event; } iboe_join; }; struct list_head list; void *context; struct sockaddr_storage addr; u8 join_state; }; struct cma_work { struct work_struct work; struct rdma_id_private *id; enum rdma_cm_state old_state; enum rdma_cm_state new_state; struct rdma_cm_event event; }; union cma_ip_addr { struct in6_addr ip6; struct { __be32 pad[3]; __be32 addr; } ip4; }; struct cma_hdr { u8 cma_version; u8 ip_version; /* IP version: 7:4 */ __be16 port; union cma_ip_addr src_addr; union cma_ip_addr dst_addr; }; #define CMA_VERSION 0x00 struct cma_req_info { struct sockaddr_storage listen_addr_storage; struct sockaddr_storage src_addr_storage; struct ib_device *device; union ib_gid local_gid; __be64 service_id; int port; bool has_gid; u16 pkey; }; static int cma_comp_exch(struct rdma_id_private *id_priv, enum rdma_cm_state comp, enum rdma_cm_state exch) { unsigned long flags; int ret; /* * The FSM uses a funny double locking where state is protected by both * the handler_mutex and the spinlock. State is not allowed to change * to/from a handler_mutex protected value without also holding * handler_mutex. */ if (comp == RDMA_CM_CONNECT || exch == RDMA_CM_CONNECT) lockdep_assert_held(&id_priv->handler_mutex); spin_lock_irqsave(&id_priv->lock, flags); if ((ret = (id_priv->state == comp))) id_priv->state = exch; spin_unlock_irqrestore(&id_priv->lock, flags); return ret; } static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr) { return hdr->ip_version >> 4; } static void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver) { hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF); } static struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv) { return (struct sockaddr *)&id_priv->id.route.addr.src_addr; } static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv) { return (struct sockaddr *)&id_priv->id.route.addr.dst_addr; } static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join) { struct in_device *in_dev = NULL; if (ndev) { rtnl_lock(); in_dev = __in_dev_get_rtnl(ndev); if (in_dev) { if (join) ip_mc_inc_group(in_dev, *(__be32 *)(mgid->raw + 12)); else ip_mc_dec_group(in_dev, *(__be32 *)(mgid->raw + 12)); } rtnl_unlock(); } return (in_dev) ? 0 : -ENODEV; } static int compare_netdev_and_ip(int ifindex_a, struct sockaddr *sa, struct id_table_entry *entry_b) { struct rdma_id_private *id_priv = list_first_entry( &entry_b->id_list, struct rdma_id_private, id_list_entry); int ifindex_b = id_priv->id.route.addr.dev_addr.bound_dev_if; struct sockaddr *sb = cma_dst_addr(id_priv); if (ifindex_a != ifindex_b) return (ifindex_a > ifindex_b) ? 1 : -1; if (sa->sa_family != sb->sa_family) return sa->sa_family - sb->sa_family; if (sa->sa_family == AF_INET && __builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in)) { return memcmp(&((struct sockaddr_in *)sa)->sin_addr, &((struct sockaddr_in *)sb)->sin_addr, sizeof(((struct sockaddr_in *)sa)->sin_addr)); } if (sa->sa_family == AF_INET6 && __builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in6)) { return ipv6_addr_cmp(&((struct sockaddr_in6 *)sa)->sin6_addr, &((struct sockaddr_in6 *)sb)->sin6_addr); } return -1; } static int cma_add_id_to_tree(struct rdma_id_private *node_id_priv) { struct rb_node **new, *parent = NULL; struct id_table_entry *this, *node; unsigned long flags; int result; node = kzalloc(sizeof(*node), GFP_KERNEL); if (!node) return -ENOMEM; spin_lock_irqsave(&id_table_lock, flags); new = &id_table.rb_node; while (*new) { this = container_of(*new, struct id_table_entry, rb_node); result = compare_netdev_and_ip( node_id_priv->id.route.addr.dev_addr.bound_dev_if, cma_dst_addr(node_id_priv), this); parent = *new; if (result < 0) new = &((*new)->rb_left); else if (result > 0) new = &((*new)->rb_right); else { list_add_tail(&node_id_priv->id_list_entry, &this->id_list); kfree(node); goto unlock; } } INIT_LIST_HEAD(&node->id_list); list_add_tail(&node_id_priv->id_list_entry, &node->id_list); rb_link_node(&node->rb_node, parent, new); rb_insert_color(&node->rb_node, &id_table); unlock: spin_unlock_irqrestore(&id_table_lock, flags); return 0; } static struct id_table_entry * node_from_ndev_ip(struct rb_root *root, int ifindex, struct sockaddr *sa) { struct rb_node *node = root->rb_node; struct id_table_entry *data; int result; while (node) { data = container_of(node, struct id_table_entry, rb_node); result = compare_netdev_and_ip(ifindex, sa, data); if (result < 0) node = node->rb_left; else if (result > 0) node = node->rb_right; else return data; } return NULL; } static void cma_remove_id_from_tree(struct rdma_id_private *id_priv) { struct id_table_entry *data; unsigned long flags; spin_lock_irqsave(&id_table_lock, flags); if (list_empty(&id_priv->id_list_entry)) goto out; data = node_from_ndev_ip(&id_table, id_priv->id.route.addr.dev_addr.bound_dev_if, cma_dst_addr(id_priv)); if (!data) goto out; list_del_init(&id_priv->id_list_entry); if (list_empty(&data->id_list)) { rb_erase(&data->rb_node, &id_table); kfree(data); } out: spin_unlock_irqrestore(&id_table_lock, flags); } static void _cma_attach_to_dev(struct rdma_id_private *id_priv, struct cma_device *cma_dev) { cma_dev_get(cma_dev); id_priv->cma_dev = cma_dev; id_priv->id.device = cma_dev->device; id_priv->id.route.addr.dev_addr.transport = rdma_node_get_transport(cma_dev->device->node_type); list_add_tail(&id_priv->device_item, &cma_dev->id_list); trace_cm_id_attach(id_priv, cma_dev->device); } static void cma_attach_to_dev(struct rdma_id_private *id_priv, struct cma_device *cma_dev) { _cma_attach_to_dev(id_priv, cma_dev); id_priv->gid_type = cma_dev->default_gid_type[id_priv->id.port_num - rdma_start_port(cma_dev->device)]; } static void cma_release_dev(struct rdma_id_private *id_priv) { mutex_lock(&lock); list_del_init(&id_priv->device_item); cma_dev_put(id_priv->cma_dev); id_priv->cma_dev = NULL; id_priv->id.device = NULL; if (id_priv->id.route.addr.dev_addr.sgid_attr) { rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr); id_priv->id.route.addr.dev_addr.sgid_attr = NULL; } mutex_unlock(&lock); } static inline unsigned short cma_family(struct rdma_id_private *id_priv) { return id_priv->id.route.addr.src_addr.ss_family; } static int cma_set_default_qkey(struct rdma_id_private *id_priv) { struct ib_sa_mcmember_rec rec; int ret = 0; switch (id_priv->id.ps) { case RDMA_PS_UDP: case RDMA_PS_IB: id_priv->qkey = RDMA_UDP_QKEY; break; case RDMA_PS_IPOIB: ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid); ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, &rec.mgid, &rec); if (!ret) id_priv->qkey = be32_to_cpu(rec.qkey); break; default: break; } return ret; } static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey) { if (!qkey || (id_priv->qkey && (id_priv->qkey != qkey))) return -EINVAL; id_priv->qkey = qkey; return 0; } static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr) { dev_addr->dev_type = ARPHRD_INFINIBAND; rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr); ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey)); } static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) { int ret; if (addr->sa_family != AF_IB) { ret = rdma_translate_ip(addr, dev_addr); } else { cma_translate_ib((struct sockaddr_ib *) addr, dev_addr); ret = 0; } return ret; } static const struct ib_gid_attr * cma_validate_port(struct ib_device *device, u32 port, enum ib_gid_type gid_type, union ib_gid *gid, struct rdma_id_private *id_priv) { struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; const struct ib_gid_attr *sgid_attr = ERR_PTR(-ENODEV); int bound_if_index = dev_addr->bound_dev_if; int dev_type = dev_addr->dev_type; struct net_device *ndev = NULL; struct net_device *pdev = NULL; if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net)) goto out; if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port)) goto out; if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port)) goto out; /* * For drivers that do not associate more than one net device with * their gid tables, such as iWARP drivers, it is sufficient to * return the first table entry. * * Other driver classes might be included in the future. */ if (rdma_protocol_iwarp(device, port)) { sgid_attr = rdma_get_gid_attr(device, port, 0); if (IS_ERR(sgid_attr)) goto out; rcu_read_lock(); ndev = rcu_dereference(sgid_attr->ndev); if (ndev->ifindex != bound_if_index) { pdev = dev_get_by_index_rcu(dev_addr->net, bound_if_index); if (pdev) { if (is_vlan_dev(pdev)) { pdev = vlan_dev_real_dev(pdev); if (ndev->ifindex == pdev->ifindex) bound_if_index = pdev->ifindex; } if (is_vlan_dev(ndev)) { pdev = vlan_dev_real_dev(ndev); if (bound_if_index == pdev->ifindex) bound_if_index = ndev->ifindex; } } } if (!net_eq(dev_net(ndev), dev_addr->net) || ndev->ifindex != bound_if_index) { rdma_put_gid_attr(sgid_attr); sgid_attr = ERR_PTR(-ENODEV); } rcu_read_unlock(); goto out; } if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) { ndev = dev_get_by_index(dev_addr->net, bound_if_index); if (!ndev) goto out; } else { gid_type = IB_GID_TYPE_IB; } sgid_attr = rdma_find_gid_by_port(device, gid, gid_type, port, ndev); dev_put(ndev); out: return sgid_attr; } static void cma_bind_sgid_attr(struct rdma_id_private *id_priv, const struct ib_gid_attr *sgid_attr) { WARN_ON(id_priv->id.route.addr.dev_addr.sgid_attr); id_priv->id.route.addr.dev_addr.sgid_attr = sgid_attr; } /** * cma_acquire_dev_by_src_ip - Acquire cma device, port, gid attribute * based on source ip address. * @id_priv: cm_id which should be bound to cma device * * cma_acquire_dev_by_src_ip() binds cm id to cma device, port and GID attribute * based on source IP address. It returns 0 on success or error code otherwise. * It is applicable to active and passive side cm_id. */ static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv) { struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; const struct ib_gid_attr *sgid_attr; union ib_gid gid, iboe_gid, *gidp; struct cma_device *cma_dev; enum ib_gid_type gid_type; int ret = -ENODEV; u32 port; if (dev_addr->dev_type != ARPHRD_INFINIBAND && id_priv->id.ps == RDMA_PS_IPOIB) return -EINVAL; rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, &iboe_gid); memcpy(&gid, dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr), sizeof(gid)); mutex_lock(&lock); list_for_each_entry(cma_dev, &dev_list, list) { rdma_for_each_port (cma_dev->device, port) { gidp = rdma_protocol_roce(cma_dev->device, port) ? &iboe_gid : &gid; gid_type = cma_dev->default_gid_type[port - 1]; sgid_attr = cma_validate_port(cma_dev->device, port, gid_type, gidp, id_priv); if (!IS_ERR(sgid_attr)) { id_priv->id.port_num = port; cma_bind_sgid_attr(id_priv, sgid_attr); cma_attach_to_dev(id_priv, cma_dev); ret = 0; goto out; } } } out: mutex_unlock(&lock); return ret; } /** * cma_ib_acquire_dev - Acquire cma device, port and SGID attribute * @id_priv: cm id to bind to cma device * @listen_id_priv: listener cm id to match against * @req: Pointer to req structure containaining incoming * request information * cma_ib_acquire_dev() acquires cma device, port and SGID attribute when * rdma device matches for listen_id and incoming request. It also verifies * that a GID table entry is present for the source address. * Returns 0 on success, or returns error code otherwise. */ static int cma_ib_acquire_dev(struct rdma_id_private *id_priv, const struct rdma_id_private *listen_id_priv, struct cma_req_info *req) { struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; const struct ib_gid_attr *sgid_attr; enum ib_gid_type gid_type; union ib_gid gid; if (dev_addr->dev_type != ARPHRD_INFINIBAND && id_priv->id.ps == RDMA_PS_IPOIB) return -EINVAL; if (rdma_protocol_roce(req->device, req->port)) rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, &gid); else memcpy(&gid, dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr), sizeof(gid)); gid_type = listen_id_priv->cma_dev->default_gid_type[req->port - 1]; sgid_attr = cma_validate_port(req->device, req->port, gid_type, &gid, id_priv); if (IS_ERR(sgid_attr)) return PTR_ERR(sgid_attr); id_priv->id.port_num = req->port; cma_bind_sgid_attr(id_priv, sgid_attr); /* Need to acquire lock to protect against reader * of cma_dev->id_list such as cma_netdev_callback() and * cma_process_remove(). */ mutex_lock(&lock); cma_attach_to_dev(id_priv, listen_id_priv->cma_dev); mutex_unlock(&lock); rdma_restrack_add(&id_priv->res); return 0; } static int cma_iw_acquire_dev(struct rdma_id_private *id_priv, const struct rdma_id_private *listen_id_priv) { struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; const struct ib_gid_attr *sgid_attr; struct cma_device *cma_dev; enum ib_gid_type gid_type; int ret = -ENODEV; union ib_gid gid; u32 port; if (dev_addr->dev_type != ARPHRD_INFINIBAND && id_priv->id.ps == RDMA_PS_IPOIB) return -EINVAL; memcpy(&gid, dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr), sizeof(gid)); mutex_lock(&lock); cma_dev = listen_id_priv->cma_dev; port = listen_id_priv->id.port_num; gid_type = listen_id_priv->gid_type; sgid_attr = cma_validate_port(cma_dev->device, port, gid_type, &gid, id_priv); if (!IS_ERR(sgid_attr)) { id_priv->id.port_num = port; cma_bind_sgid_attr(id_priv, sgid_attr); ret = 0; goto out; } list_for_each_entry(cma_dev, &dev_list, list) { rdma_for_each_port (cma_dev->device, port) { if (listen_id_priv->cma_dev == cma_dev && listen_id_priv->id.port_num == port) continue; gid_type = cma_dev->default_gid_type[port - 1]; sgid_attr = cma_validate_port(cma_dev->device, port, gid_type, &gid, id_priv); if (!IS_ERR(sgid_attr)) { id_priv->id.port_num = port; cma_bind_sgid_attr(id_priv, sgid_attr); ret = 0; goto out; } } } out: if (!ret) { cma_attach_to_dev(id_priv, cma_dev); rdma_restrack_add(&id_priv->res); } mutex_unlock(&lock); return ret; } /* * Select the source IB device and address to reach the destination IB address. */ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) { struct cma_device *cma_dev, *cur_dev; struct sockaddr_ib *addr; union ib_gid gid, sgid, *dgid; unsigned int p; u16 pkey, index; enum ib_port_state port_state; int ret; int i; cma_dev = NULL; addr = (struct sockaddr_ib *) cma_dst_addr(id_priv); dgid = (union ib_gid *) &addr->sib_addr; pkey = ntohs(addr->sib_pkey); mutex_lock(&lock); list_for_each_entry(cur_dev, &dev_list, list) { rdma_for_each_port (cur_dev->device, p) { if (!rdma_cap_af_ib(cur_dev->device, p)) continue; if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index)) continue; if (ib_get_cached_port_state(cur_dev->device, p, &port_state)) continue; for (i = 0; i < cur_dev->device->port_data[p].immutable.gid_tbl_len; ++i) { ret = rdma_query_gid(cur_dev->device, p, i, &gid); if (ret) continue; if (!memcmp(&gid, dgid, sizeof(gid))) { cma_dev = cur_dev; sgid = gid; id_priv->id.port_num = p; goto found; } if (!cma_dev && (gid.global.subnet_prefix == dgid->global.subnet_prefix) && port_state == IB_PORT_ACTIVE) { cma_dev = cur_dev; sgid = gid; id_priv->id.port_num = p; goto found; } } } } mutex_unlock(&lock); return -ENODEV; found: cma_attach_to_dev(id_priv, cma_dev); rdma_restrack_add(&id_priv->res); mutex_unlock(&lock); addr = (struct sockaddr_ib *)cma_src_addr(id_priv); memcpy(&addr->sib_addr, &sgid, sizeof(sgid)); cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); return 0; } static void cma_id_get(struct rdma_id_private *id_priv) { refcount_inc(&id_priv->refcount); } static void cma_id_put(struct rdma_id_private *id_priv) { if (refcount_dec_and_test(&id_priv->refcount)) complete(&id_priv->comp); } static struct rdma_id_private * __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler, void *context, enum rdma_ucm_port_space ps, enum ib_qp_type qp_type, const struct rdma_id_private *parent) { struct rdma_id_private *id_priv; id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL); if (!id_priv) return ERR_PTR(-ENOMEM); id_priv->state = RDMA_CM_IDLE; id_priv->id.context = context; id_priv->id.event_handler = event_handler; id_priv->id.ps = ps; id_priv->id.qp_type = qp_type; id_priv->tos_set = false; id_priv->timeout_set = false; id_priv->min_rnr_timer_set = false; id_priv->gid_type = IB_GID_TYPE_IB; spin_lock_init(&id_priv->lock); mutex_init(&id_priv->qp_mutex); init_completion(&id_priv->comp); refcount_set(&id_priv->refcount, 1); mutex_init(&id_priv->handler_mutex); INIT_LIST_HEAD(&id_priv->device_item); INIT_LIST_HEAD(&id_priv->id_list_entry); INIT_LIST_HEAD(&id_priv->listen_list); INIT_LIST_HEAD(&id_priv->mc_list); get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); id_priv->id.route.addr.dev_addr.net = get_net(net); id_priv->seq_num &= 0x00ffffff; rdma_restrack_new(&id_priv->res, RDMA_RESTRACK_CM_ID); if (parent) rdma_restrack_parent_name(&id_priv->res, &parent->res); return id_priv; } struct rdma_cm_id * __rdma_create_kernel_id(struct net *net, rdma_cm_event_handler event_handler, void *context, enum rdma_ucm_port_space ps, enum ib_qp_type qp_type, const char *caller) { struct rdma_id_private *ret; ret = __rdma_create_id(net, event_handler, context, ps, qp_type, NULL); if (IS_ERR(ret)) return ERR_CAST(ret); rdma_restrack_set_name(&ret->res, caller); return &ret->id; } EXPORT_SYMBOL(__rdma_create_kernel_id); struct rdma_cm_id *rdma_create_user_id(rdma_cm_event_handler event_handler, void *context, enum rdma_ucm_port_space ps, enum ib_qp_type qp_type) { struct rdma_id_private *ret; ret = __rdma_create_id(current->nsproxy->net_ns, event_handler, context, ps, qp_type, NULL); if (IS_ERR(ret)) return ERR_CAST(ret); rdma_restrack_set_name(&ret->res, NULL); return &ret->id; } EXPORT_SYMBOL(rdma_create_user_id); static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) { struct ib_qp_attr qp_attr; int qp_attr_mask, ret; qp_attr.qp_state = IB_QPS_INIT; ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); if (ret) return ret; ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); if (ret) return ret; qp_attr.qp_state = IB_QPS_RTR; ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); if (ret) return ret; qp_attr.qp_state = IB_QPS_RTS; qp_attr.sq_psn = 0; ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); return ret; } static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) { struct ib_qp_attr qp_attr; int qp_attr_mask, ret; qp_attr.qp_state = IB_QPS_INIT; ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); if (ret) return ret; return ib_modify_qp(qp, &qp_attr, qp_attr_mask); } int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, struct ib_qp_init_attr *qp_init_attr) { struct rdma_id_private *id_priv; struct ib_qp *qp; int ret; id_priv = container_of(id, struct rdma_id_private, id); if (id->device != pd->device) { ret = -EINVAL; goto out_err; } qp_init_attr->port_num = id->port_num; qp = ib_create_qp(pd, qp_init_attr); if (IS_ERR(qp)) { ret = PTR_ERR(qp); goto out_err; } if (id->qp_type == IB_QPT_UD) ret = cma_init_ud_qp(id_priv, qp); else ret = cma_init_conn_qp(id_priv, qp); if (ret) goto out_destroy; id->qp = qp; id_priv->qp_num = qp->qp_num; id_priv->srq = (qp->srq != NULL); trace_cm_qp_create(id_priv, pd, qp_init_attr, 0); return 0; out_destroy: ib_destroy_qp(qp); out_err: trace_cm_qp_create(id_priv, pd, qp_init_attr, ret); return ret; } EXPORT_SYMBOL(rdma_create_qp); void rdma_destroy_qp(struct rdma_cm_id *id) { struct rdma_id_private *id_priv; id_priv = container_of(id, struct rdma_id_private, id); trace_cm_qp_destroy(id_priv); mutex_lock(&id_priv->qp_mutex); ib_destroy_qp(id_priv->id.qp); id_priv->id.qp = NULL; mutex_unlock(&id_priv->qp_mutex); } EXPORT_SYMBOL(rdma_destroy_qp); static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, struct rdma_conn_param *conn_param) { struct ib_qp_attr qp_attr; int qp_attr_mask, ret; mutex_lock(&id_priv->qp_mutex); if (!id_priv->id.qp) { ret = 0; goto out; } /* Need to update QP attributes from default values. */ qp_attr.qp_state = IB_QPS_INIT; ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); if (ret) goto out; ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); if (ret) goto out; qp_attr.qp_state = IB_QPS_RTR; ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); if (ret) goto out; BUG_ON(id_priv->cma_dev->device != id_priv->id.device); if (conn_param) qp_attr.max_dest_rd_atomic = conn_param->responder_resources; ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); out: mutex_unlock(&id_priv->qp_mutex); return ret; } static int cma_modify_qp_rts(struct rdma_id_private *id_priv, struct rdma_conn_param *conn_param) { struct ib_qp_attr qp_attr; int qp_attr_mask, ret; mutex_lock(&id_priv->qp_mutex); if (!id_priv->id.qp) { ret = 0; goto out; } qp_attr.qp_state = IB_QPS_RTS; ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); if (ret) goto out; if (conn_param) qp_attr.max_rd_atomic = conn_param->initiator_depth; ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); out: mutex_unlock(&id_priv->qp_mutex); return ret; } static int cma_modify_qp_err(struct rdma_id_private *id_priv) { struct ib_qp_attr qp_attr; int ret; mutex_lock(&id_priv->qp_mutex); if (!id_priv->id.qp) { ret = 0; goto out; } qp_attr.qp_state = IB_QPS_ERR; ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); out: mutex_unlock(&id_priv->qp_mutex); return ret; } static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; int ret; u16 pkey; if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num)) pkey = 0xffff; else pkey = ib_addr_get_pkey(dev_addr); ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, pkey, &qp_attr->pkey_index); if (ret) return ret; qp_attr->port_num = id_priv->id.port_num; *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; if (id_priv->id.qp_type == IB_QPT_UD) { ret = cma_set_default_qkey(id_priv); if (ret) return ret; qp_attr->qkey = id_priv->qkey; *qp_attr_mask |= IB_QP_QKEY; } else { qp_attr->qp_access_flags = 0; *qp_attr_mask |= IB_QP_ACCESS_FLAGS; } return 0; } int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { struct rdma_id_private *id_priv; int ret = 0; id_priv = container_of(id, struct rdma_id_private, id); if (rdma_cap_ib_cm(id->device, id->port_num)) { if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); else ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, qp_attr_mask); if (qp_attr->qp_state == IB_QPS_RTR) qp_attr->rq_psn = id_priv->seq_num; } else if (rdma_cap_iw_cm(id->device, id->port_num)) { if (!id_priv->cm_id.iw) { qp_attr->qp_access_flags = 0; *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; } else ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, qp_attr_mask); qp_attr->port_num = id_priv->id.port_num; *qp_attr_mask |= IB_QP_PORT; } else { ret = -ENOSYS; } if ((*qp_attr_mask & IB_QP_TIMEOUT) && id_priv->timeout_set) qp_attr->timeout = id_priv->timeout; if ((*qp_attr_mask & IB_QP_MIN_RNR_TIMER) && id_priv->min_rnr_timer_set) qp_attr->min_rnr_timer = id_priv->min_rnr_timer; return ret; } EXPORT_SYMBOL(rdma_init_qp_attr); static inline bool cma_zero_addr(const struct sockaddr *addr) { switch (addr->sa_family) { case AF_INET: return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr); case AF_INET6: return ipv6_addr_any(&((struct sockaddr_in6 *)addr)->sin6_addr); case AF_IB: return ib_addr_any(&((struct sockaddr_ib *)addr)->sib_addr); default: return false; } } static inline bool cma_loopback_addr(const struct sockaddr *addr) { switch (addr->sa_family) { case AF_INET: return ipv4_is_loopback( ((struct sockaddr_in *)addr)->sin_addr.s_addr); case AF_INET6: return ipv6_addr_loopback( &((struct sockaddr_in6 *)addr)->sin6_addr); case AF_IB: return ib_addr_loopback( &((struct sockaddr_ib *)addr)->sib_addr); default: return false; } } static inline bool cma_any_addr(const struct sockaddr *addr) { return cma_zero_addr(addr) || cma_loopback_addr(addr); } static int cma_addr_cmp(const struct sockaddr *src, const struct sockaddr *dst) { if (src->sa_family != dst->sa_family) return -1; switch (src->sa_family) { case AF_INET: return ((struct sockaddr_in *)src)->sin_addr.s_addr != ((struct sockaddr_in *)dst)->sin_addr.s_addr; case AF_INET6: { struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *)src; struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *)dst; bool link_local; if (ipv6_addr_cmp(&src_addr6->sin6_addr, &dst_addr6->sin6_addr)) return 1; link_local = ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL; /* Link local must match their scope_ids */ return link_local ? (src_addr6->sin6_scope_id != dst_addr6->sin6_scope_id) : 0; } default: return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr, &((struct sockaddr_ib *) dst)->sib_addr); } } static __be16 cma_port(const struct sockaddr *addr) { struct sockaddr_ib *sib; switch (addr->sa_family) { case AF_INET: return ((struct sockaddr_in *) addr)->sin_port; case AF_INET6: return ((struct sockaddr_in6 *) addr)->sin6_port; case AF_IB: sib = (struct sockaddr_ib *) addr; return htons((u16) (be64_to_cpu(sib->sib_sid) & be64_to_cpu(sib->sib_sid_mask))); default: return 0; } } static inline int cma_any_port(const struct sockaddr *addr) { return !cma_port(addr); } static void cma_save_ib_info(struct sockaddr *src_addr, struct sockaddr *dst_addr, const struct rdma_cm_id *listen_id, const struct sa_path_rec *path) { struct sockaddr_ib *listen_ib, *ib; listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr; if (src_addr) { ib = (struct sockaddr_ib *)src_addr; ib->sib_family = AF_IB; if (path) { ib->sib_pkey = path->pkey; ib->sib_flowinfo = path->flow_label; memcpy(&ib->sib_addr, &path->sgid, 16); ib->sib_sid = path->service_id; ib->sib_scope_id = 0; } else { ib->sib_pkey = listen_ib->sib_pkey; ib->sib_flowinfo = listen_ib->sib_flowinfo; ib->sib_addr = listen_ib->sib_addr; ib->sib_sid = listen_ib->sib_sid; ib->sib_scope_id = listen_ib->sib_scope_id; } ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL); } if (dst_addr) { ib = (struct sockaddr_ib *)dst_addr; ib->sib_family = AF_IB; if (path) { ib->sib_pkey = path->pkey; ib->sib_flowinfo = path->flow_label; memcpy(&ib->sib_addr, &path->dgid, 16); } } } static void cma_save_ip4_info(struct sockaddr_in *src_addr, struct sockaddr_in *dst_addr, struct cma_hdr *hdr, __be16 local_port) { if (src_addr) { *src_addr = (struct sockaddr_in) { .sin_family = AF_INET, .sin_addr.s_addr = hdr->dst_addr.ip4.addr, .sin_port = local_port, }; } if (dst_addr) { *dst_addr = (struct sockaddr_in) { .sin_family = AF_INET, .sin_addr.s_addr = hdr->src_addr.ip4.addr, .sin_port = hdr->port, }; } } static void cma_save_ip6_info(struct sockaddr_in6 *src_addr, struct sockaddr_in6 *dst_addr, struct cma_hdr *hdr, __be16 local_port) { if (src_addr) { *src_addr = (struct sockaddr_in6) { .sin6_family = AF_INET6, .sin6_addr = hdr->dst_addr.ip6, .sin6_port = local_port, }; } if (dst_addr) { *dst_addr = (struct sockaddr_in6) { .sin6_family = AF_INET6, .sin6_addr = hdr->src_addr.ip6, .sin6_port = hdr->port, }; } } static u16 cma_port_from_service_id(__be64 service_id) { return (u16)be64_to_cpu(service_id); } static int cma_save_ip_info(struct sockaddr *src_addr, struct sockaddr *dst_addr, const struct ib_cm_event *ib_event, __be64 service_id) { struct cma_hdr *hdr; __be16 port; hdr = ib_event->private_data; if (hdr->cma_version != CMA_VERSION) return -EINVAL; port = htons(cma_port_from_service_id(service_id)); switch (cma_get_ip_ver(hdr)) { case 4: cma_save_ip4_info((struct sockaddr_in *)src_addr, (struct sockaddr_in *)dst_addr, hdr, port); break; case 6: cma_save_ip6_info((struct sockaddr_in6 *)src_addr, (struct sockaddr_in6 *)dst_addr, hdr, port); break; default: return -EAFNOSUPPORT; } return 0; } static int cma_save_net_info(struct sockaddr *src_addr, struct sockaddr *dst_addr, const struct rdma_cm_id *listen_id, const struct ib_cm_event *ib_event, sa_family_t sa_family, __be64 service_id) { if (sa_family == AF_IB) { if (ib_event->event == IB_CM_REQ_RECEIVED) cma_save_ib_info(src_addr, dst_addr, listen_id, ib_event->param.req_rcvd.primary_path); else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) cma_save_ib_info(src_addr, dst_addr, listen_id, NULL); return 0; } return cma_save_ip_info(src_addr, dst_addr, ib_event, service_id); } static int cma_save_req_info(const struct ib_cm_event *ib_event, struct cma_req_info *req) { const struct ib_cm_req_event_param *req_param = &ib_event->param.req_rcvd; const struct ib_cm_sidr_req_event_param *sidr_param = &ib_event->param.sidr_req_rcvd; switch (ib_event->event) { case IB_CM_REQ_RECEIVED: req->device = req_param->listen_id->device; req->port = req_param->port; memcpy(&req->local_gid, &req_param->primary_path->sgid, sizeof(req->local_gid)); req->has_gid = true; req->service_id = req_param->primary_path->service_id; req->pkey = be16_to_cpu(req_param->primary_path->pkey); if (req->pkey != req_param->bth_pkey) pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n" "RDMA CMA: in the future this may cause the request to be dropped\n", req_param->bth_pkey, req->pkey); break; case IB_CM_SIDR_REQ_RECEIVED: req->device = sidr_param->listen_id->device; req->port = sidr_param->port; req->has_gid = false; req->service_id = sidr_param->service_id; req->pkey = sidr_param->pkey; if (req->pkey != sidr_param->bth_pkey) pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n" "RDMA CMA: in the future this may cause the request to be dropped\n", sidr_param->bth_pkey, req->pkey); break; default: return -EINVAL; } return 0; } static bool validate_ipv4_net_dev(struct net_device *net_dev, const struct sockaddr_in *dst_addr, const struct sockaddr_in *src_addr) { __be32 daddr = dst_addr->sin_addr.s_addr, saddr = src_addr->sin_addr.s_addr; struct fib_result res; struct flowi4 fl4; int err; bool ret; if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || ipv4_is_lbcast(daddr) || ipv4_is_zeronet(saddr) || ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr) || ipv4_is_loopback(saddr)) return false; memset(&fl4, 0, sizeof(fl4)); fl4.flowi4_oif = net_dev->ifindex; fl4.daddr = daddr; fl4.saddr = saddr; rcu_read_lock(); err = fib_lookup(dev_net(net_dev), &fl4, &res, 0); ret = err == 0 && FIB_RES_DEV(res) == net_dev; rcu_read_unlock(); return ret; } static bool validate_ipv6_net_dev(struct net_device *net_dev, const struct sockaddr_in6 *dst_addr, const struct sockaddr_in6 *src_addr) { #if IS_ENABLED(CONFIG_IPV6) const int strict = ipv6_addr_type(&dst_addr->sin6_addr) & IPV6_ADDR_LINKLOCAL; struct rt6_info *rt = rt6_lookup(dev_net(net_dev), &dst_addr->sin6_addr, &src_addr->sin6_addr, net_dev->ifindex, NULL, strict); bool ret; if (!rt) return false; ret = rt->rt6i_idev->dev == net_dev; ip6_rt_put(rt); return ret; #else return false; #endif } static bool validate_net_dev(struct net_device *net_dev, const struct sockaddr *daddr, const struct sockaddr *saddr) { const struct sockaddr_in *daddr4 = (const struct sockaddr_in *)daddr; const struct sockaddr_in *saddr4 = (const struct sockaddr_in *)saddr; const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr; const struct sockaddr_in6 *saddr6 = (const struct sockaddr_in6 *)saddr; switch (daddr->sa_family) { case AF_INET: return saddr->sa_family == AF_INET && validate_ipv4_net_dev(net_dev, daddr4, saddr4); case AF_INET6: return saddr->sa_family == AF_INET6 && validate_ipv6_net_dev(net_dev, daddr6, saddr6); default: return false; } } static struct net_device * roce_get_net_dev_by_cm_event(const struct ib_cm_event *ib_event) { const struct ib_gid_attr *sgid_attr = NULL; struct net_device *ndev; if (ib_event->event == IB_CM_REQ_RECEIVED) sgid_attr = ib_event->param.req_rcvd.ppath_sgid_attr; else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) sgid_attr = ib_event->param.sidr_req_rcvd.sgid_attr; if (!sgid_attr) return NULL; rcu_read_lock(); ndev = rdma_read_gid_attr_ndev_rcu(sgid_attr); if (IS_ERR(ndev)) ndev = NULL; else dev_hold(ndev); rcu_read_unlock(); return ndev; } static struct net_device *cma_get_net_dev(const struct ib_cm_event *ib_event, struct cma_req_info *req) { struct sockaddr *listen_addr = (struct sockaddr *)&req->listen_addr_storage; struct sockaddr *src_addr = (struct sockaddr *)&req->src_addr_storage; struct net_device *net_dev; const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL; int err; err = cma_save_ip_info(listen_addr, src_addr, ib_event, req->service_id); if (err) return ERR_PTR(err); if (rdma_protocol_roce(req->device, req->port)) net_dev = roce_get_net_dev_by_cm_event(ib_event); else net_dev = ib_get_net_dev_by_params(req->device, req->port, req->pkey, gid, listen_addr); if (!net_dev) return ERR_PTR(-ENODEV); return net_dev; } static enum rdma_ucm_port_space rdma_ps_from_service_id(__be64 service_id) { return (be64_to_cpu(service_id) >> 16) & 0xffff; } static bool cma_match_private_data(struct rdma_id_private *id_priv, const struct cma_hdr *hdr) { struct sockaddr *addr = cma_src_addr(id_priv); __be32 ip4_addr; struct in6_addr ip6_addr; if (cma_any_addr(addr) && !id_priv->afonly) return true; switch (addr->sa_family) { case AF_INET: ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr; if (cma_get_ip_ver(hdr) != 4) return false; if (!cma_any_addr(addr) && hdr->dst_addr.ip4.addr != ip4_addr) return false; break; case AF_INET6: ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr; if (cma_get_ip_ver(hdr) != 6) return false; if (!cma_any_addr(addr) && memcmp(&hdr->dst_addr.ip6, &ip6_addr, sizeof(ip6_addr))) return false; break; case AF_IB: return true; default: return false; } return true; } static bool cma_protocol_roce(const struct rdma_cm_id *id) { struct ib_device *device = id->device; const u32 port_num = id->port_num ?: rdma_start_port(device); return rdma_protocol_roce(device, port_num); } static bool cma_is_req_ipv6_ll(const struct cma_req_info *req) { const struct sockaddr *daddr = (const struct sockaddr *)&req->listen_addr_storage; const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr; /* Returns true if the req is for IPv6 link local */ return (daddr->sa_family == AF_INET6 && (ipv6_addr_type(&daddr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)); } static bool cma_match_net_dev(const struct rdma_cm_id *id, const struct net_device *net_dev, const struct cma_req_info *req) { const struct rdma_addr *addr = &id->route.addr; if (!net_dev) /* This request is an AF_IB request */ return (!id->port_num || id->port_num == req->port) && (addr->src_addr.ss_family == AF_IB); /* * If the request is not for IPv6 link local, allow matching * request to any netdevice of the one or multiport rdma device. */ if (!cma_is_req_ipv6_ll(req)) return true; /* * Net namespaces must match, and if the listner is listening * on a specific netdevice than netdevice must match as well. */ if (net_eq(dev_net(net_dev), addr->dev_addr.net) && (!!addr->dev_addr.bound_dev_if == (addr->dev_addr.bound_dev_if == net_dev->ifindex))) return true; else return false; } static struct rdma_id_private *cma_find_listener( const struct rdma_bind_list *bind_list, const struct ib_cm_id *cm_id, const struct ib_cm_event *ib_event, const struct cma_req_info *req, const struct net_device *net_dev) { struct rdma_id_private *id_priv, *id_priv_dev; lockdep_assert_held(&lock); if (!bind_list) return ERR_PTR(-EINVAL); hlist_for_each_entry(id_priv, &bind_list->owners, node) { if (cma_match_private_data(id_priv, ib_event->private_data)) { if (id_priv->id.device == cm_id->device && cma_match_net_dev(&id_priv->id, net_dev, req)) return id_priv; list_for_each_entry(id_priv_dev, &id_priv->listen_list, listen_item) { if (id_priv_dev->id.device == cm_id->device && cma_match_net_dev(&id_priv_dev->id, net_dev, req)) return id_priv_dev; } } } return ERR_PTR(-EINVAL); } static struct rdma_id_private * cma_ib_id_from_event(struct ib_cm_id *cm_id, const struct ib_cm_event *ib_event, struct cma_req_info *req, struct net_device **net_dev) { struct rdma_bind_list *bind_list; struct rdma_id_private *id_priv; int err; err = cma_save_req_info(ib_event, req); if (err) return ERR_PTR(err); *net_dev = cma_get_net_dev(ib_event, req); if (IS_ERR(*net_dev)) { if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) { /* Assuming the protocol is AF_IB */ *net_dev = NULL; } else { return ERR_CAST(*net_dev); } } mutex_lock(&lock); /* * Net namespace might be getting deleted while route lookup, * cm_id lookup is in progress. Therefore, perform netdevice * validation, cm_id lookup under rcu lock. * RCU lock along with netdevice state check, synchronizes with * netdevice migrating to different net namespace and also avoids * case where net namespace doesn't get deleted while lookup is in * progress. * If the device state is not IFF_UP, its properties such as ifindex * and nd_net cannot be trusted to remain valid without rcu lock. * net/core/dev.c change_net_namespace() ensures to synchronize with * ongoing operations on net device after device is closed using * synchronize_net(). */ rcu_read_lock(); if (*net_dev) { /* * If netdevice is down, it is likely that it is administratively * down or it might be migrating to different namespace. * In that case avoid further processing, as the net namespace * or ifindex may change. */ if (((*net_dev)->flags & IFF_UP) == 0) { id_priv = ERR_PTR(-EHOSTUNREACH); goto err; } if (!validate_net_dev(*net_dev, (struct sockaddr *)&req->src_addr_storage, (struct sockaddr *)&req->listen_addr_storage)) { id_priv = ERR_PTR(-EHOSTUNREACH); goto err; } } bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net, rdma_ps_from_service_id(req->service_id), cma_port_from_service_id(req->service_id)); id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev); err: rcu_read_unlock(); mutex_unlock(&lock); if (IS_ERR(id_priv) && *net_dev) { dev_put(*net_dev); *net_dev = NULL; } return id_priv; } static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv) { return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr); } static void cma_cancel_route(struct rdma_id_private *id_priv) { if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) { if (id_priv->query) ib_sa_cancel_query(id_priv->query_id, id_priv->query); } } static void _cma_cancel_listens(struct rdma_id_private *id_priv) { struct rdma_id_private *dev_id_priv; lockdep_assert_held(&lock); /* * Remove from listen_any_list to prevent added devices from spawning * additional listen requests. */ list_del_init(&id_priv->listen_any_item); while (!list_empty(&id_priv->listen_list)) { dev_id_priv = list_first_entry(&id_priv->listen_list, struct rdma_id_private, listen_item); /* sync with device removal to avoid duplicate destruction */ list_del_init(&dev_id_priv->device_item); list_del_init(&dev_id_priv->listen_item); mutex_unlock(&lock); rdma_destroy_id(&dev_id_priv->id); mutex_lock(&lock); } } static void cma_cancel_listens(struct rdma_id_private *id_priv) { mutex_lock(&lock); _cma_cancel_listens(id_priv); mutex_unlock(&lock); } static void cma_cancel_operation(struct rdma_id_private *id_priv, enum rdma_cm_state state) { switch (state) { case RDMA_CM_ADDR_QUERY: /* * We can avoid doing the rdma_addr_cancel() based on state, * only RDMA_CM_ADDR_QUERY has a work that could still execute. * Notice that the addr_handler work could still be exiting * outside this state, however due to the interaction with the * handler_mutex the work is guaranteed not to touch id_priv * during exit. */ rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); break; case RDMA_CM_ROUTE_QUERY: cma_cancel_route(id_priv); break; case RDMA_CM_LISTEN: if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev) cma_cancel_listens(id_priv); break; default: break; } } static void cma_release_port(struct rdma_id_private *id_priv) { struct rdma_bind_list *bind_list = id_priv->bind_list; struct net *net = id_priv->id.route.addr.dev_addr.net; if (!bind_list) return; mutex_lock(&lock); hlist_del(&id_priv->node); if (hlist_empty(&bind_list->owners)) { cma_ps_remove(net, bind_list->ps, bind_list->port); kfree(bind_list); } mutex_unlock(&lock); } static void destroy_mc(struct rdma_id_private *id_priv, struct cma_multicast *mc) { bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num)) ib_sa_free_multicast(mc->sa_mc); if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) { struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; struct net_device *ndev = NULL; if (dev_addr->bound_dev_if) ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); if (ndev && !send_only) { enum ib_gid_type gid_type; union ib_gid mgid; gid_type = id_priv->cma_dev->default_gid_type [id_priv->id.port_num - rdma_start_port( id_priv->cma_dev->device)]; cma_iboe_set_mgid((struct sockaddr *)&mc->addr, &mgid, gid_type); cma_igmp_send(ndev, &mgid, false); } dev_put(ndev); cancel_work_sync(&mc->iboe_join.work); } kfree(mc); } static void cma_leave_mc_groups(struct rdma_id_private *id_priv) { struct cma_multicast *mc; while (!list_empty(&id_priv->mc_list)) { mc = list_first_entry(&id_priv->mc_list, struct cma_multicast, list); list_del(&mc->list); destroy_mc(id_priv, mc); } } static void _destroy_id(struct rdma_id_private *id_priv, enum rdma_cm_state state) { cma_cancel_operation(id_priv, state); rdma_restrack_del(&id_priv->res); cma_remove_id_from_tree(id_priv); if (id_priv->cma_dev) { if (rdma_cap_ib_cm(id_priv->id.device, 1)) { if (id_priv->cm_id.ib) ib_destroy_cm_id(id_priv->cm_id.ib); } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) { if (id_priv->cm_id.iw) iw_destroy_cm_id(id_priv->cm_id.iw); } cma_leave_mc_groups(id_priv); cma_release_dev(id_priv); } cma_release_port(id_priv); cma_id_put(id_priv); wait_for_completion(&id_priv->comp); if (id_priv->internal_id) cma_id_put(id_priv->id.context); kfree(id_priv->id.route.path_rec); kfree(id_priv->id.route.path_rec_inbound); kfree(id_priv->id.route.path_rec_outbound); put_net(id_priv->id.route.addr.dev_addr.net); kfree(id_priv); } /* * destroy an ID from within the handler_mutex. This ensures that no other * handlers can start running concurrently. */ static void destroy_id_handler_unlock(struct rdma_id_private *id_priv) __releases(&idprv->handler_mutex) { enum rdma_cm_state state; unsigned long flags; trace_cm_id_destroy(id_priv); /* * Setting the state to destroyed under the handler mutex provides a * fence against calling handler callbacks. If this is invoked due to * the failure of a handler callback then it guarentees that no future * handlers will be called. */ lockdep_assert_held(&id_priv->handler_mutex); spin_lock_irqsave(&id_priv->lock, flags); state = id_priv->state; id_priv->state = RDMA_CM_DESTROYING; spin_unlock_irqrestore(&id_priv->lock, flags); mutex_unlock(&id_priv->handler_mutex); _destroy_id(id_priv, state); } void rdma_destroy_id(struct rdma_cm_id *id) { struct rdma_id_private *id_priv = container_of(id, struct rdma_id_private, id); mutex_lock(&id_priv->handler_mutex); destroy_id_handler_unlock(id_priv); } EXPORT_SYMBOL(rdma_destroy_id); static int cma_rep_recv(struct rdma_id_private *id_priv) { int ret; ret = cma_modify_qp_rtr(id_priv, NULL); if (ret) goto reject; ret = cma_modify_qp_rts(id_priv, NULL); if (ret) goto reject; trace_cm_send_rtu(id_priv); ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); if (ret) goto reject; return 0; reject: pr_debug_ratelimited("RDMA CM: CONNECT_ERROR: failed to handle reply. status %d\n", ret); cma_modify_qp_err(id_priv); trace_cm_send_rej(id_priv); ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, NULL, 0); return ret; } static void cma_set_rep_event_data(struct rdma_cm_event *event, const struct ib_cm_rep_event_param *rep_data, void *private_data) { event->param.conn.private_data = private_data; event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; event->param.conn.responder_resources = rep_data->responder_resources; event->param.conn.initiator_depth = rep_data->initiator_depth; event->param.conn.flow_control = rep_data->flow_control; event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; event->param.conn.srq = rep_data->srq; event->param.conn.qp_num = rep_data->remote_qpn; event->ece.vendor_id = rep_data->ece.vendor_id; event->ece.attr_mod = rep_data->ece.attr_mod; } static int cma_cm_event_handler(struct rdma_id_private *id_priv, struct rdma_cm_event *event) { int ret; lockdep_assert_held(&id_priv->handler_mutex); trace_cm_event_handler(id_priv, event); ret = id_priv->id.event_handler(&id_priv->id, event); trace_cm_event_done(id_priv, event, ret); return ret; } static int cma_ib_handler(struct ib_cm_id *cm_id, const struct ib_cm_event *ib_event) { struct rdma_id_private *id_priv = cm_id->context; struct rdma_cm_event event = {}; enum rdma_cm_state state; int ret; mutex_lock(&id_priv->handler_mutex); state = READ_ONCE(id_priv->state); if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && state != RDMA_CM_CONNECT) || (ib_event->event == IB_CM_TIMEWAIT_EXIT && state != RDMA_CM_DISCONNECT)) goto out; switch (ib_event->event) { case IB_CM_REQ_ERROR: case IB_CM_REP_ERROR: event.event = RDMA_CM_EVENT_UNREACHABLE; event.status = -ETIMEDOUT; break; case IB_CM_REP_RECEIVED: if (state == RDMA_CM_CONNECT && (id_priv->id.qp_type != IB_QPT_UD)) { trace_cm_send_mra(id_priv); ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); } if (id_priv->id.qp) { event.status = cma_rep_recv(id_priv); event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : RDMA_CM_EVENT_ESTABLISHED; } else { event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; } cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd, ib_event->private_data); break; case IB_CM_RTU_RECEIVED: case IB_CM_USER_ESTABLISHED: event.event = RDMA_CM_EVENT_ESTABLISHED; break; case IB_CM_DREQ_ERROR: event.status = -ETIMEDOUT; fallthrough; case IB_CM_DREQ_RECEIVED: case IB_CM_DREP_RECEIVED: if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_DISCONNECT)) goto out; event.event = RDMA_CM_EVENT_DISCONNECTED; break; case IB_CM_TIMEWAIT_EXIT: event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT; break; case IB_CM_MRA_RECEIVED: /* ignore event */ goto out; case IB_CM_REJ_RECEIVED: pr_debug_ratelimited("RDMA CM: REJECTED: %s\n", rdma_reject_msg(&id_priv->id, ib_event->param.rej_rcvd.reason)); cma_modify_qp_err(id_priv); event.status = ib_event->param.rej_rcvd.reason; event.event = RDMA_CM_EVENT_REJECTED; event.param.conn.private_data = ib_event->private_data; event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; break; default: pr_err("RDMA CMA: unexpected IB CM event: %d\n", ib_event->event); goto out; } ret = cma_cm_event_handler(id_priv, &event); if (ret) { /* Destroy the CM ID by returning a non-zero value. */ id_priv->cm_id.ib = NULL; destroy_id_handler_unlock(id_priv); return ret; } out: mutex_unlock(&id_priv->handler_mutex); return 0; } static struct rdma_id_private * cma_ib_new_conn_id(const struct rdma_cm_id *listen_id, const struct ib_cm_event *ib_event, struct net_device *net_dev) { struct rdma_id_private *listen_id_priv; struct rdma_id_private *id_priv; struct rdma_cm_id *id; struct rdma_route *rt; const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path; const __be64 service_id = ib_event->param.req_rcvd.primary_path->service_id; int ret; listen_id_priv = container_of(listen_id, struct rdma_id_private, id); id_priv = __rdma_create_id(listen_id->route.addr.dev_addr.net, listen_id->event_handler, listen_id->context, listen_id->ps, ib_event->param.req_rcvd.qp_type, listen_id_priv); if (IS_ERR(id_priv)) return NULL; id = &id_priv->id; if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, (struct sockaddr *)&id->route.addr.dst_addr, listen_id, ib_event, ss_family, service_id)) goto err; rt = &id->route; rt->num_pri_alt_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; rt->path_rec = kmalloc_array(rt->num_pri_alt_paths, sizeof(*rt->path_rec), GFP_KERNEL); if (!rt->path_rec) goto err; rt->path_rec[0] = *path; if (rt->num_pri_alt_paths == 2) rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; if (net_dev) { rdma_copy_src_l2_addr(&rt->addr.dev_addr, net_dev); } else { if (!cma_protocol_roce(listen_id) && cma_any_addr(cma_src_addr(id_priv))) { rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); } else if (!cma_any_addr(cma_src_addr(id_priv))) { ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr); if (ret) goto err; } } rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); id_priv->state = RDMA_CM_CONNECT; return id_priv; err: rdma_destroy_id(id); return NULL; } static struct rdma_id_private * cma_ib_new_udp_id(const struct rdma_cm_id *listen_id, const struct ib_cm_event *ib_event, struct net_device *net_dev) { const struct rdma_id_private *listen_id_priv; struct rdma_id_private *id_priv; struct rdma_cm_id *id; const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; struct net *net = listen_id->route.addr.dev_addr.net; int ret; listen_id_priv = container_of(listen_id, struct rdma_id_private, id); id_priv = __rdma_create_id(net, listen_id->event_handler, listen_id->context, listen_id->ps, IB_QPT_UD, listen_id_priv); if (IS_ERR(id_priv)) return NULL; id = &id_priv->id; if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, (struct sockaddr *)&id->route.addr.dst_addr, listen_id, ib_event, ss_family, ib_event->param.sidr_req_rcvd.service_id)) goto err; if (net_dev) { rdma_copy_src_l2_addr(&id->route.addr.dev_addr, net_dev); } else { if (!cma_any_addr(cma_src_addr(id_priv))) { ret = cma_translate_addr(cma_src_addr(id_priv), &id->route.addr.dev_addr); if (ret) goto err; } } id_priv->state = RDMA_CM_CONNECT; return id_priv; err: rdma_destroy_id(id); return NULL; } static void cma_set_req_event_data(struct rdma_cm_event *event, const struct ib_cm_req_event_param *req_data, void *private_data, int offset) { event->param.conn.private_data = private_data + offset; event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset; event->param.conn.responder_resources = req_data->responder_resources; event->param.conn.initiator_depth = req_data->initiator_depth; event->param.conn.flow_control = req_data->flow_control; event->param.conn.retry_count = req_data->retry_count; event->param.conn.rnr_retry_count = req_data->rnr_retry_count; event->param.conn.srq = req_data->srq; event->param.conn.qp_num = req_data->remote_qpn; event->ece.vendor_id = req_data->ece.vendor_id; event->ece.attr_mod = req_data->ece.attr_mod; } static int cma_ib_check_req_qp_type(const struct rdma_cm_id *id, const struct ib_cm_event *ib_event) { return (((ib_event->event == IB_CM_REQ_RECEIVED) && (ib_event->param.req_rcvd.qp_type == id->qp_type)) || ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && (id->qp_type == IB_QPT_UD)) || (!id->qp_type)); } static int cma_ib_req_handler(struct ib_cm_id *cm_id, const struct ib_cm_event *ib_event) { struct rdma_id_private *listen_id, *conn_id = NULL; struct rdma_cm_event event = {}; struct cma_req_info req = {}; struct net_device *net_dev; u8 offset; int ret; listen_id = cma_ib_id_from_event(cm_id, ib_event, &req, &net_dev); if (IS_ERR(listen_id)) return PTR_ERR(listen_id); trace_cm_req_handler(listen_id, ib_event->event); if (!cma_ib_check_req_qp_type(&listen_id->id, ib_event)) { ret = -EINVAL; goto net_dev_put; } mutex_lock(&listen_id->handler_mutex); if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) { ret = -ECONNABORTED; goto err_unlock; } offset = cma_user_data_offset(listen_id); event.event = RDMA_CM_EVENT_CONNECT_REQUEST; if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) { conn_id = cma_ib_new_udp_id(&listen_id->id, ib_event, net_dev); event.param.ud.private_data = ib_event->private_data + offset; event.param.ud.private_data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; } else { conn_id = cma_ib_new_conn_id(&listen_id->id, ib_event, net_dev); cma_set_req_event_data(&event, &ib_event->param.req_rcvd, ib_event->private_data, offset); } if (!conn_id) { ret = -ENOMEM; goto err_unlock; } mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); ret = cma_ib_acquire_dev(conn_id, listen_id, &req); if (ret) { destroy_id_handler_unlock(conn_id); goto err_unlock; } conn_id->cm_id.ib = cm_id; cm_id->context = conn_id; cm_id->cm_handler = cma_ib_handler; ret = cma_cm_event_handler(conn_id, &event); if (ret) { /* Destroy the CM ID by returning a non-zero value. */ conn_id->cm_id.ib = NULL; mutex_unlock(&listen_id->handler_mutex); destroy_id_handler_unlock(conn_id); goto net_dev_put; } if (READ_ONCE(conn_id->state) == RDMA_CM_CONNECT && conn_id->id.qp_type != IB_QPT_UD) { trace_cm_send_mra(cm_id->context); ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); } mutex_unlock(&conn_id->handler_mutex); err_unlock: mutex_unlock(&listen_id->handler_mutex); net_dev_put: dev_put(net_dev); return ret; } __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr) { if (addr->sa_family == AF_IB) return ((struct sockaddr_ib *) addr)->sib_sid; return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr))); } EXPORT_SYMBOL(rdma_get_service_id); void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid, union ib_gid *dgid) { struct rdma_addr *addr = &cm_id->route.addr; if (!cm_id->device) { if (sgid) memset(sgid, 0, sizeof(*sgid)); if (dgid) memset(dgid, 0, sizeof(*dgid)); return; } if (rdma_protocol_roce(cm_id->device, cm_id->port_num)) { if (sgid) rdma_ip2gid((struct sockaddr *)&addr->src_addr, sgid); if (dgid) rdma_ip2gid((struct sockaddr *)&addr->dst_addr, dgid); } else { if (sgid) rdma_addr_get_sgid(&addr->dev_addr, sgid); if (dgid) rdma_addr_get_dgid(&addr->dev_addr, dgid); } } EXPORT_SYMBOL(rdma_read_gids); static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) { struct rdma_id_private *id_priv = iw_id->context; struct rdma_cm_event event = {}; int ret = 0; struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; mutex_lock(&id_priv->handler_mutex); if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) goto out; switch (iw_event->event) { case IW_CM_EVENT_CLOSE: event.event = RDMA_CM_EVENT_DISCONNECTED; break; case IW_CM_EVENT_CONNECT_REPLY: memcpy(cma_src_addr(id_priv), laddr, rdma_addr_size(laddr)); memcpy(cma_dst_addr(id_priv), raddr, rdma_addr_size(raddr)); switch (iw_event->status) { case 0: event.event = RDMA_CM_EVENT_ESTABLISHED; event.param.conn.initiator_depth = iw_event->ird; event.param.conn.responder_resources = iw_event->ord; break; case -ECONNRESET: case -ECONNREFUSED: event.event = RDMA_CM_EVENT_REJECTED; break; case -ETIMEDOUT: event.event = RDMA_CM_EVENT_UNREACHABLE; break; default: event.event = RDMA_CM_EVENT_CONNECT_ERROR; break; } break; case IW_CM_EVENT_ESTABLISHED: event.event = RDMA_CM_EVENT_ESTABLISHED; event.param.conn.initiator_depth = iw_event->ird; event.param.conn.responder_resources = iw_event->ord; break; default: goto out; } event.status = iw_event->status; event.param.conn.private_data = iw_event->private_data; event.param.conn.private_data_len = iw_event->private_data_len; ret = cma_cm_event_handler(id_priv, &event); if (ret) { /* Destroy the CM ID by returning a non-zero value. */ id_priv->cm_id.iw = NULL; destroy_id_handler_unlock(id_priv); return ret; } out: mutex_unlock(&id_priv->handler_mutex); return ret; } static int iw_conn_req_handler(struct iw_cm_id *cm_id, struct iw_cm_event *iw_event) { struct rdma_id_private *listen_id, *conn_id; struct rdma_cm_event event = {}; int ret = -ECONNABORTED; struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; event.event = RDMA_CM_EVENT_CONNECT_REQUEST; event.param.conn.private_data = iw_event->private_data; event.param.conn.private_data_len = iw_event->private_data_len; event.param.conn.initiator_depth = iw_event->ird; event.param.conn.responder_resources = iw_event->ord; listen_id = cm_id->context; mutex_lock(&listen_id->handler_mutex); if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) goto out; /* Create a new RDMA id for the new IW CM ID */ conn_id = __rdma_create_id(listen_id->id.route.addr.dev_addr.net, listen_id->id.event_handler, listen_id->id.context, RDMA_PS_TCP, IB_QPT_RC, listen_id); if (IS_ERR(conn_id)) { ret = -ENOMEM; goto out; } mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); conn_id->state = RDMA_CM_CONNECT; ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr); if (ret) { mutex_unlock(&listen_id->handler_mutex); destroy_id_handler_unlock(conn_id); return ret; } ret = cma_iw_acquire_dev(conn_id, listen_id); if (ret) { mutex_unlock(&listen_id->handler_mutex); destroy_id_handler_unlock(conn_id); return ret; } conn_id->cm_id.iw = cm_id; cm_id->context = conn_id; cm_id->cm_handler = cma_iw_handler; memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr)); memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr)); ret = cma_cm_event_handler(conn_id, &event); if (ret) { /* User wants to destroy the CM ID */ conn_id->cm_id.iw = NULL; mutex_unlock(&listen_id->handler_mutex); destroy_id_handler_unlock(conn_id); return ret; } mutex_unlock(&conn_id->handler_mutex); out: mutex_unlock(&listen_id->handler_mutex); return ret; } static int cma_ib_listen(struct rdma_id_private *id_priv) { struct sockaddr *addr; struct ib_cm_id *id; __be64 svc_id; addr = cma_src_addr(id_priv); svc_id = rdma_get_service_id(&id_priv->id, addr); id = ib_cm_insert_listen(id_priv->id.device, cma_ib_req_handler, svc_id); if (IS_ERR(id)) return PTR_ERR(id); id_priv->cm_id.ib = id; return 0; } static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) { int ret; struct iw_cm_id *id; id = iw_create_cm_id(id_priv->id.device, iw_conn_req_handler, id_priv); if (IS_ERR(id)) return PTR_ERR(id); mutex_lock(&id_priv->qp_mutex); id->tos = id_priv->tos; id->tos_set = id_priv->tos_set; mutex_unlock(&id_priv->qp_mutex); id->afonly = id_priv->afonly; id_priv->cm_id.iw = id; memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv), rdma_addr_size(cma_src_addr(id_priv))); ret = iw_cm_listen(id_priv->cm_id.iw, backlog); if (ret) { iw_destroy_cm_id(id_priv->cm_id.iw); id_priv->cm_id.iw = NULL; } return ret; } static int cma_listen_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) { struct rdma_id_private *id_priv = id->context; /* Listening IDs are always destroyed on removal */ if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) return -1; id->context = id_priv->id.context; id->event_handler = id_priv->id.event_handler; trace_cm_event_handler(id_priv, event); return id_priv->id.event_handler(id, event); } static int cma_listen_on_dev(struct rdma_id_private *id_priv, struct cma_device *cma_dev, struct rdma_id_private **to_destroy) { struct rdma_id_private *dev_id_priv; struct net *net = id_priv->id.route.addr.dev_addr.net; int ret; lockdep_assert_held(&lock); *to_destroy = NULL; if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) return 0; dev_id_priv = __rdma_create_id(net, cma_listen_handler, id_priv, id_priv->id.ps, id_priv->id.qp_type, id_priv); if (IS_ERR(dev_id_priv)) return PTR_ERR(dev_id_priv); dev_id_priv->state = RDMA_CM_ADDR_BOUND; memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv), rdma_addr_size(cma_src_addr(id_priv))); _cma_attach_to_dev(dev_id_priv, cma_dev); rdma_restrack_add(&dev_id_priv->res); cma_id_get(id_priv); dev_id_priv->internal_id = 1; dev_id_priv->afonly = id_priv->afonly; mutex_lock(&id_priv->qp_mutex); dev_id_priv->tos_set = id_priv->tos_set; dev_id_priv->tos = id_priv->tos; mutex_unlock(&id_priv->qp_mutex); ret = rdma_listen(&dev_id_priv->id, id_priv->backlog); if (ret) goto err_listen; list_add_tail(&dev_id_priv->listen_item, &id_priv->listen_list); return 0; err_listen: /* Caller must destroy this after releasing lock */ *to_destroy = dev_id_priv; dev_warn(&cma_dev->device->dev, "RDMA CMA: %s, error %d\n", __func__, ret); return ret; } static int cma_listen_on_all(struct rdma_id_private *id_priv) { struct rdma_id_private *to_destroy; struct cma_device *cma_dev; int ret; mutex_lock(&lock); list_add_tail(&id_priv->listen_any_item, &listen_any_list); list_for_each_entry(cma_dev, &dev_list, list) { ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy); if (ret) { /* Prevent racing with cma_process_remove() */ if (to_destroy) list_del_init(&to_destroy->device_item); goto err_listen; } } mutex_unlock(&lock); return 0; err_listen: _cma_cancel_listens(id_priv); mutex_unlock(&lock); if (to_destroy) rdma_destroy_id(&to_destroy->id); return ret; } void rdma_set_service_type(struct rdma_cm_id *id, int tos) { struct rdma_id_private *id_priv; id_priv = container_of(id, struct rdma_id_private, id); mutex_lock(&id_priv->qp_mutex); id_priv->tos = (u8) tos; id_priv->tos_set = true; mutex_unlock(&id_priv->qp_mutex); } EXPORT_SYMBOL(rdma_set_service_type); /** * rdma_set_ack_timeout() - Set the ack timeout of QP associated * with a connection identifier. * @id: Communication identifier to associated with service type. * @timeout: Ack timeout to set a QP, expressed as 4.096 * 2^(timeout) usec. * * This function should be called before rdma_connect() on active side, * and on passive side before rdma_accept(). It is applicable to primary * path only. The timeout will affect the local side of the QP, it is not * negotiated with remote side and zero disables the timer. In case it is * set before rdma_resolve_route, the value will also be used to determine * PacketLifeTime for RoCE. * * Return: 0 for success */ int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout) { struct rdma_id_private *id_priv; if (id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_INI) return -EINVAL; id_priv = container_of(id, struct rdma_id_private, id); mutex_lock(&id_priv->qp_mutex); id_priv->timeout = timeout; id_priv->timeout_set = true; mutex_unlock(&id_priv->qp_mutex); return 0; } EXPORT_SYMBOL(rdma_set_ack_timeout); /** * rdma_set_min_rnr_timer() - Set the minimum RNR Retry timer of the * QP associated with a connection identifier. * @id: Communication identifier to associated with service type. * @min_rnr_timer: 5-bit value encoded as Table 45: "Encoding for RNR NAK * Timer Field" in the IBTA specification. * * This function should be called before rdma_connect() on active * side, and on passive side before rdma_accept(). The timer value * will be associated with the local QP. When it receives a send it is * not read to handle, typically if the receive queue is empty, an RNR * Retry NAK is returned to the requester with the min_rnr_timer * encoded. The requester will then wait at least the time specified * in the NAK before retrying. The default is zero, which translates * to a minimum RNR Timer value of 655 ms. * * Return: 0 for success */ int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer) { struct rdma_id_private *id_priv; /* It is a five-bit value */ if (min_rnr_timer & 0xe0) return -EINVAL; if (WARN_ON(id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_TGT)) return -EINVAL; id_priv = container_of(id, struct rdma_id_private, id); mutex_lock(&id_priv->qp_mutex); id_priv->min_rnr_timer = min_rnr_timer; id_priv->min_rnr_timer_set = true; mutex_unlock(&id_priv->qp_mutex); return 0; } EXPORT_SYMBOL(rdma_set_min_rnr_timer); static int route_set_path_rec_inbound(struct cma_work *work, struct sa_path_rec *path_rec) { struct rdma_route *route = &work->id->id.route; if (!route->path_rec_inbound) { route->path_rec_inbound = kzalloc(sizeof(*route->path_rec_inbound), GFP_KERNEL); if (!route->path_rec_inbound) return -ENOMEM; } *route->path_rec_inbound = *path_rec; return 0; } static int route_set_path_rec_outbound(struct cma_work *work, struct sa_path_rec *path_rec) { struct rdma_route *route = &work->id->id.route; if (!route->path_rec_outbound) { route->path_rec_outbound = kzalloc(sizeof(*route->path_rec_outbound), GFP_KERNEL); if (!route->path_rec_outbound) return -ENOMEM; } *route->path_rec_outbound = *path_rec; return 0; } static void cma_query_handler(int status, struct sa_path_rec *path_rec, unsigned int num_prs, void *context) { struct cma_work *work = context; struct rdma_route *route; int i; route = &work->id->id.route; if (status) goto fail; for (i = 0; i < num_prs; i++) { if (!path_rec[i].flags || (path_rec[i].flags & IB_PATH_GMP)) *route->path_rec = path_rec[i]; else if (path_rec[i].flags & IB_PATH_INBOUND) status = route_set_path_rec_inbound(work, &path_rec[i]); else if (path_rec[i].flags & IB_PATH_OUTBOUND) status = route_set_path_rec_outbound(work, &path_rec[i]); else status = -EINVAL; if (status) goto fail; } route->num_pri_alt_paths = 1; queue_work(cma_wq, &work->work); return; fail: work->old_state = RDMA_CM_ROUTE_QUERY; work->new_state = RDMA_CM_ADDR_RESOLVED; work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; work->event.status = status; pr_debug_ratelimited("RDMA CM: ROUTE_ERROR: failed to query path. status %d\n", status); queue_work(cma_wq, &work->work); } static int cma_query_ib_route(struct rdma_id_private *id_priv, unsigned long timeout_ms, struct cma_work *work) { struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; struct sa_path_rec path_rec; ib_sa_comp_mask comp_mask; struct sockaddr_in6 *sin6; struct sockaddr_ib *sib; memset(&path_rec, 0, sizeof path_rec); if (rdma_cap_opa_ah(id_priv->id.device, id_priv->id.port_num)) path_rec.rec_type = SA_PATH_REC_TYPE_OPA; else path_rec.rec_type = SA_PATH_REC_TYPE_IB; rdma_addr_get_sgid(dev_addr, &path_rec.sgid); rdma_addr_get_dgid(dev_addr, &path_rec.dgid); path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); path_rec.numb_path = 1; path_rec.reversible = 1; path_rec.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; switch (cma_family(id_priv)) { case AF_INET: path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); comp_mask |= IB_SA_PATH_REC_QOS_CLASS; break; case AF_INET6: sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20); comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; break; case AF_IB: sib = (struct sockaddr_ib *) cma_src_addr(id_priv); path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20); comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; break; } id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, id_priv->id.port_num, &path_rec, comp_mask, timeout_ms, GFP_KERNEL, cma_query_handler, work, &id_priv->query); return (id_priv->query_id < 0) ? id_priv->query_id : 0; } static void cma_iboe_join_work_handler(struct work_struct *work) { struct cma_multicast *mc = container_of(work, struct cma_multicast, iboe_join.work); struct rdma_cm_event *event = &mc->iboe_join.event; struct rdma_id_private *id_priv = mc->id_priv; int ret; mutex_lock(&id_priv->handler_mutex); if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) goto out_unlock; ret = cma_cm_event_handler(id_priv, event); WARN_ON(ret); out_unlock: mutex_unlock(&id_priv->handler_mutex); if (event->event == RDMA_CM_EVENT_MULTICAST_JOIN) rdma_destroy_ah_attr(&event->param.ud.ah_attr); } static void cma_work_handler(struct work_struct *_work) { struct cma_work *work = container_of(_work, struct cma_work, work); struct rdma_id_private *id_priv = work->id; mutex_lock(&id_priv->handler_mutex); if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) goto out_unlock; if (work->old_state != 0 || work->new_state != 0) { if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) goto out_unlock; } if (cma_cm_event_handler(id_priv, &work->event)) { cma_id_put(id_priv); destroy_id_handler_unlock(id_priv); goto out_free; } out_unlock: mutex_unlock(&id_priv->handler_mutex); cma_id_put(id_priv); out_free: if (work->event.event == RDMA_CM_EVENT_MULTICAST_JOIN) rdma_destroy_ah_attr(&work->event.param.ud.ah_attr); kfree(work); } static void cma_init_resolve_route_work(struct cma_work *work, struct rdma_id_private *id_priv) { work->id = id_priv; INIT_WORK(&work->work, cma_work_handler); work->old_state = RDMA_CM_ROUTE_QUERY; work->new_state = RDMA_CM_ROUTE_RESOLVED; work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; } static void enqueue_resolve_addr_work(struct cma_work *work, struct rdma_id_private *id_priv) { /* Balances with cma_id_put() in cma_work_handler */ cma_id_get(id_priv); work->id = id_priv; INIT_WORK(&work->work, cma_work_handler); work->old_state = RDMA_CM_ADDR_QUERY; work->new_state = RDMA_CM_ADDR_RESOLVED; work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; queue_work(cma_wq, &work->work); } static int cma_resolve_ib_route(struct rdma_id_private *id_priv, unsigned long timeout_ms) { struct rdma_route *route = &id_priv->id.route; struct cma_work *work; int ret; work = kzalloc(sizeof *work, GFP_KERNEL); if (!work) return -ENOMEM; cma_init_resolve_route_work(work, id_priv); if (!route->path_rec) route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); if (!route->path_rec) { ret = -ENOMEM; goto err1; } ret = cma_query_ib_route(id_priv, timeout_ms, work); if (ret) goto err2; return 0; err2: kfree(route->path_rec); route->path_rec = NULL; err1: kfree(work); return ret; } static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type, unsigned long supported_gids, enum ib_gid_type default_gid) { if ((network_type == RDMA_NETWORK_IPV4 || network_type == RDMA_NETWORK_IPV6) && test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids)) return IB_GID_TYPE_ROCE_UDP_ENCAP; return default_gid; } /* * cma_iboe_set_path_rec_l2_fields() is helper function which sets * path record type based on GID type. * It also sets up other L2 fields which includes destination mac address * netdev ifindex, of the path record. * It returns the netdev of the bound interface for this path record entry. */ static struct net_device * cma_iboe_set_path_rec_l2_fields(struct rdma_id_private *id_priv) { struct rdma_route *route = &id_priv->id.route; enum ib_gid_type gid_type = IB_GID_TYPE_ROCE; struct rdma_addr *addr = &route->addr; unsigned long supported_gids; struct net_device *ndev; if (!addr->dev_addr.bound_dev_if) return NULL; ndev = dev_get_by_index(addr->dev_addr.net, addr->dev_addr.bound_dev_if); if (!ndev) return NULL; supported_gids = roce_gid_type_mask_support(id_priv->id.device, id_priv->id.port_num); gid_type = cma_route_gid_type(addr->dev_addr.network, supported_gids, id_priv->gid_type); /* Use the hint from IP Stack to select GID Type */ if (gid_type < ib_network_to_gid_type(addr->dev_addr.network)) gid_type = ib_network_to_gid_type(addr->dev_addr.network); route->path_rec->rec_type = sa_conv_gid_to_pathrec_type(gid_type); route->path_rec->roce.route_resolved = true; sa_path_set_dmac(route->path_rec, addr->dev_addr.dst_dev_addr); return ndev; } int rdma_set_ib_path(struct rdma_cm_id *id, struct sa_path_rec *path_rec) { struct rdma_id_private *id_priv; struct net_device *ndev; int ret; id_priv = container_of(id, struct rdma_id_private, id); if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_RESOLVED)) return -EINVAL; id->route.path_rec = kmemdup(path_rec, sizeof(*path_rec), GFP_KERNEL); if (!id->route.path_rec) { ret = -ENOMEM; goto err; } if (rdma_protocol_roce(id->device, id->port_num)) { ndev = cma_iboe_set_path_rec_l2_fields(id_priv); if (!ndev) { ret = -ENODEV; goto err_free; } dev_put(ndev); } id->route.num_pri_alt_paths = 1; return 0; err_free: kfree(id->route.path_rec); id->route.path_rec = NULL; err: cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED); return ret; } EXPORT_SYMBOL(rdma_set_ib_path); static int cma_resolve_iw_route(struct rdma_id_private *id_priv) { struct cma_work *work; work = kzalloc(sizeof *work, GFP_KERNEL); if (!work) return -ENOMEM; cma_init_resolve_route_work(work, id_priv); queue_work(cma_wq, &work->work); return 0; } static int get_vlan_ndev_tc(struct net_device *vlan_ndev, int prio) { struct net_device *dev; dev = vlan_dev_real_dev(vlan_ndev); if (dev->num_tc) return netdev_get_prio_tc_map(dev, prio); return (vlan_dev_get_egress_qos_mask(vlan_ndev, prio) & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; } struct iboe_prio_tc_map { int input_prio; int output_tc; bool found; }; static int get_lower_vlan_dev_tc(struct net_device *dev, struct netdev_nested_priv *priv) { struct iboe_prio_tc_map *map = (struct iboe_prio_tc_map *)priv->data; if (is_vlan_dev(dev)) map->output_tc = get_vlan_ndev_tc(dev, map->input_prio); else if (dev->num_tc) map->output_tc = netdev_get_prio_tc_map(dev, map->input_prio); else map->output_tc = 0; /* We are interested only in first level VLAN device, so always * return 1 to stop iterating over next level devices. */ map->found = true; return 1; } static int iboe_tos_to_sl(struct net_device *ndev, int tos) { struct iboe_prio_tc_map prio_tc_map = {}; int prio = rt_tos2priority(tos); struct netdev_nested_priv priv; /* If VLAN device, get it directly from the VLAN netdev */ if (is_vlan_dev(ndev)) return get_vlan_ndev_tc(ndev, prio); prio_tc_map.input_prio = prio; priv.data = (void *)&prio_tc_map; rcu_read_lock(); netdev_walk_all_lower_dev_rcu(ndev, get_lower_vlan_dev_tc, &priv); rcu_read_unlock(); /* If map is found from lower device, use it; Otherwise * continue with the current netdevice to get priority to tc map. */ if (prio_tc_map.found) return prio_tc_map.output_tc; else if (ndev->num_tc) return netdev_get_prio_tc_map(ndev, prio); else return 0; } static __be32 cma_get_roce_udp_flow_label(struct rdma_id_private *id_priv) { struct sockaddr_in6 *addr6; u16 dport, sport; u32 hash, fl; addr6 = (struct sockaddr_in6 *)cma_src_addr(id_priv); fl = be32_to_cpu(addr6->sin6_flowinfo) & IB_GRH_FLOWLABEL_MASK; if ((cma_family(id_priv) != AF_INET6) || !fl) { dport = be16_to_cpu(cma_port(cma_dst_addr(id_priv))); sport = be16_to_cpu(cma_port(cma_src_addr(id_priv))); hash = (u32)sport * 31 + dport; fl = hash & IB_GRH_FLOWLABEL_MASK; } return cpu_to_be32(fl); } static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) { struct rdma_route *route = &id_priv->id.route; struct rdma_addr *addr = &route->addr; struct cma_work *work; int ret; struct net_device *ndev; u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num - rdma_start_port(id_priv->cma_dev->device)]; u8 tos; mutex_lock(&id_priv->qp_mutex); tos = id_priv->tos_set ? id_priv->tos : default_roce_tos; mutex_unlock(&id_priv->qp_mutex); work = kzalloc(sizeof *work, GFP_KERNEL); if (!work) return -ENOMEM; route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL); if (!route->path_rec) { ret = -ENOMEM; goto err1; } route->num_pri_alt_paths = 1; ndev = cma_iboe_set_path_rec_l2_fields(id_priv); if (!ndev) { ret = -ENODEV; goto err2; } rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, &route->path_rec->sgid); rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr, &route->path_rec->dgid); if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB) /* TODO: get the hoplimit from the inet/inet6 device */ route->path_rec->hop_limit = addr->dev_addr.hoplimit; else route->path_rec->hop_limit = 1; route->path_rec->reversible = 1; route->path_rec->pkey = cpu_to_be16(0xffff); route->path_rec->mtu_selector = IB_SA_EQ; route->path_rec->sl = iboe_tos_to_sl(ndev, tos); route->path_rec->traffic_class = tos; route->path_rec->mtu = iboe_get_mtu(ndev->mtu); route->path_rec->rate_selector = IB_SA_EQ; route->path_rec->rate = IB_RATE_PORT_CURRENT; dev_put(ndev); route->path_rec->packet_life_time_selector = IB_SA_EQ; /* In case ACK timeout is set, use this value to calculate * PacketLifeTime. As per IBTA 12.7.34, * local ACK timeout = (2 * PacketLifeTime + Local CA’s ACK delay). * Assuming a negligible local ACK delay, we can use * PacketLifeTime = local ACK timeout/2 * as a reasonable approximation for RoCE networks. */ mutex_lock(&id_priv->qp_mutex); if (id_priv->timeout_set && id_priv->timeout) route->path_rec->packet_life_time = id_priv->timeout - 1; else route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME; mutex_unlock(&id_priv->qp_mutex); if (!route->path_rec->mtu) { ret = -EINVAL; goto err2; } if (rdma_protocol_roce_udp_encap(id_priv->id.device, id_priv->id.port_num)) route->path_rec->flow_label = cma_get_roce_udp_flow_label(id_priv); cma_init_resolve_route_work(work, id_priv); queue_work(cma_wq, &work->work); return 0; err2: kfree(route->path_rec); route->path_rec = NULL; route->num_pri_alt_paths = 0; err1: kfree(work); return ret; } int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms) { struct rdma_id_private *id_priv; int ret; if (!timeout_ms) return -EINVAL; id_priv = container_of(id, struct rdma_id_private, id); if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY)) return -EINVAL; cma_id_get(id_priv); if (rdma_cap_ib_sa(id->device, id->port_num)) ret = cma_resolve_ib_route(id_priv, timeout_ms); else if (rdma_protocol_roce(id->device, id->port_num)) { ret = cma_resolve_iboe_route(id_priv); if (!ret) cma_add_id_to_tree(id_priv); } else if (rdma_protocol_iwarp(id->device, id->port_num)) ret = cma_resolve_iw_route(id_priv); else ret = -ENOSYS; if (ret) goto err; return 0; err: cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED); cma_id_put(id_priv); return ret; } EXPORT_SYMBOL(rdma_resolve_route); static void cma_set_loopback(struct sockaddr *addr) { switch (addr->sa_family) { case AF_INET: ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK); break; case AF_INET6: ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr, 0, 0, 0, htonl(1)); break; default: ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr, 0, 0, 0, htonl(1)); break; } } static int cma_bind_loopback(struct rdma_id_private *id_priv) { struct cma_device *cma_dev, *cur_dev; union ib_gid gid; enum ib_port_state port_state; unsigned int p; u16 pkey; int ret; cma_dev = NULL; mutex_lock(&lock); list_for_each_entry(cur_dev, &dev_list, list) { if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cur_dev->device, 1)) continue; if (!cma_dev) cma_dev = cur_dev; rdma_for_each_port (cur_dev->device, p) { if (!ib_get_cached_port_state(cur_dev->device, p, &port_state) && port_state == IB_PORT_ACTIVE) { cma_dev = cur_dev; goto port_found; } } } if (!cma_dev) { ret = -ENODEV; goto out; } p = 1; port_found: ret = rdma_query_gid(cma_dev->device, p, 0, &gid); if (ret) goto out; ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey); if (ret) goto out; id_priv->id.route.addr.dev_addr.dev_type = (rdma_protocol_ib(cma_dev->device, p)) ? ARPHRD_INFINIBAND : ARPHRD_ETHER; rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); id_priv->id.port_num = p; cma_attach_to_dev(id_priv, cma_dev); rdma_restrack_add(&id_priv->res); cma_set_loopback(cma_src_addr(id_priv)); out: mutex_unlock(&lock); return ret; } static void addr_handler(int status, struct sockaddr *src_addr, struct rdma_dev_addr *dev_addr, void *context) { struct rdma_id_private *id_priv = context; struct rdma_cm_event event = {}; struct sockaddr *addr; struct sockaddr_storage old_addr; mutex_lock(&id_priv->handler_mutex); if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_RESOLVED)) goto out; /* * Store the previous src address, so that if we fail to acquire * matching rdma device, old address can be restored back, which helps * to cancel the cma listen operation correctly. */ addr = cma_src_addr(id_priv); memcpy(&old_addr, addr, rdma_addr_size(addr)); memcpy(addr, src_addr, rdma_addr_size(src_addr)); if (!status && !id_priv->cma_dev) { status = cma_acquire_dev_by_src_ip(id_priv); if (status) pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n", status); rdma_restrack_add(&id_priv->res); } else if (status) { pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status); } if (status) { memcpy(addr, &old_addr, rdma_addr_size((struct sockaddr *)&old_addr)); if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ADDR_BOUND)) goto out; event.event = RDMA_CM_EVENT_ADDR_ERROR; event.status = status; } else event.event = RDMA_CM_EVENT_ADDR_RESOLVED; if (cma_cm_event_handler(id_priv, &event)) { destroy_id_handler_unlock(id_priv); return; } out: mutex_unlock(&id_priv->handler_mutex); } static int cma_resolve_loopback(struct rdma_id_private *id_priv) { struct cma_work *work; union ib_gid gid; int ret; work = kzalloc(sizeof *work, GFP_KERNEL); if (!work) return -ENOMEM; if (!id_priv->cma_dev) { ret = cma_bind_loopback(id_priv); if (ret) goto err; } rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); enqueue_resolve_addr_work(work, id_priv); return 0; err: kfree(work); return ret; } static int cma_resolve_ib_addr(struct rdma_id_private *id_priv) { struct cma_work *work; int ret; work = kzalloc(sizeof *work, GFP_KERNEL); if (!work) return -ENOMEM; if (!id_priv->cma_dev) { ret = cma_resolve_ib_dev(id_priv); if (ret) goto err; } rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *) &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr)); enqueue_resolve_addr_work(work, id_priv); return 0; err: kfree(work); return ret; } int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) { struct rdma_id_private *id_priv; unsigned long flags; int ret; id_priv = container_of(id, struct rdma_id_private, id); spin_lock_irqsave(&id_priv->lock, flags); if ((reuse && id_priv->state != RDMA_CM_LISTEN) || id_priv->state == RDMA_CM_IDLE) { id_priv->reuseaddr = reuse; ret = 0; } else { ret = -EINVAL; } spin_unlock_irqrestore(&id_priv->lock, flags); return ret; } EXPORT_SYMBOL(rdma_set_reuseaddr); int rdma_set_afonly(struct rdma_cm_id *id, int afonly) { struct rdma_id_private *id_priv; unsigned long flags; int ret; id_priv = container_of(id, struct rdma_id_private, id); spin_lock_irqsave(&id_priv->lock, flags); if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) { id_priv->options |= (1 << CMA_OPTION_AFONLY); id_priv->afonly = afonly; ret = 0; } else { ret = -EINVAL; } spin_unlock_irqrestore(&id_priv->lock, flags); return ret; } EXPORT_SYMBOL(rdma_set_afonly); static void cma_bind_port(struct rdma_bind_list *bind_list, struct rdma_id_private *id_priv) { struct sockaddr *addr; struct sockaddr_ib *sib; u64 sid, mask; __be16 port; lockdep_assert_held(&lock); addr = cma_src_addr(id_priv); port = htons(bind_list->port); switch (addr->sa_family) { case AF_INET: ((struct sockaddr_in *) addr)->sin_port = port; break; case AF_INET6: ((struct sockaddr_in6 *) addr)->sin6_port = port; break; case AF_IB: sib = (struct sockaddr_ib *) addr; sid = be64_to_cpu(sib->sib_sid); mask = be64_to_cpu(sib->sib_sid_mask); sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port)); sib->sib_sid_mask = cpu_to_be64(~0ULL); break; } id_priv->bind_list = bind_list; hlist_add_head(&id_priv->node, &bind_list->owners); } static int cma_alloc_port(enum rdma_ucm_port_space ps, struct rdma_id_private *id_priv, unsigned short snum) { struct rdma_bind_list *bind_list; int ret; lockdep_assert_held(&lock); bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); if (!bind_list) return -ENOMEM; ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list, snum); if (ret < 0) goto err; bind_list->ps = ps; bind_list->port = snum; cma_bind_port(bind_list, id_priv); return 0; err: kfree(bind_list); return ret == -ENOSPC ? -EADDRNOTAVAIL : ret; } static int cma_port_is_unique(struct rdma_bind_list *bind_list, struct rdma_id_private *id_priv) { struct rdma_id_private *cur_id; struct sockaddr *daddr = cma_dst_addr(id_priv); struct sockaddr *saddr = cma_src_addr(id_priv); __be16 dport = cma_port(daddr); lockdep_assert_held(&lock); hlist_for_each_entry(cur_id, &bind_list->owners, node) { struct sockaddr *cur_daddr = cma_dst_addr(cur_id); struct sockaddr *cur_saddr = cma_src_addr(cur_id); __be16 cur_dport = cma_port(cur_daddr); if (id_priv == cur_id) continue; /* different dest port -> unique */ if (!cma_any_port(daddr) && !cma_any_port(cur_daddr) && (dport != cur_dport)) continue; /* different src address -> unique */ if (!cma_any_addr(saddr) && !cma_any_addr(cur_saddr) && cma_addr_cmp(saddr, cur_saddr)) continue; /* different dst address -> unique */ if (!cma_any_addr(daddr) && !cma_any_addr(cur_daddr) && cma_addr_cmp(daddr, cur_daddr)) continue; return -EADDRNOTAVAIL; } return 0; } static int cma_alloc_any_port(enum rdma_ucm_port_space ps, struct rdma_id_private *id_priv) { static unsigned int last_used_port; int low, high, remaining; unsigned int rover; struct net *net = id_priv->id.route.addr.dev_addr.net; lockdep_assert_held(&lock); inet_get_local_port_range(net, &low, &high); remaining = (high - low) + 1; rover = get_random_u32_inclusive(low, remaining + low - 1); retry: if (last_used_port != rover) { struct rdma_bind_list *bind_list; int ret; bind_list = cma_ps_find(net, ps, (unsigned short)rover); if (!bind_list) { ret = cma_alloc_port(ps, id_priv, rover); } else { ret = cma_port_is_unique(bind_list, id_priv); if (!ret) cma_bind_port(bind_list, id_priv); } /* * Remember previously used port number in order to avoid * re-using same port immediately after it is closed. */ if (!ret) last_used_port = rover; if (ret != -EADDRNOTAVAIL) return ret; } if (--remaining) { rover++; if ((rover < low) || (rover > high)) rover = low; goto retry; } return -EADDRNOTAVAIL; } /* * Check that the requested port is available. This is called when trying to * bind to a specific port, or when trying to listen on a bound port. In * the latter case, the provided id_priv may already be on the bind_list, but * we still need to check that it's okay to start listening. */ static int cma_check_port(struct rdma_bind_list *bind_list, struct rdma_id_private *id_priv, uint8_t reuseaddr) { struct rdma_id_private *cur_id; struct sockaddr *addr, *cur_addr; lockdep_assert_held(&lock); addr = cma_src_addr(id_priv); hlist_for_each_entry(cur_id, &bind_list->owners, node) { if (id_priv == cur_id) continue; if (reuseaddr && cur_id->reuseaddr) continue; cur_addr = cma_src_addr(cur_id); if (id_priv->afonly && cur_id->afonly && (addr->sa_family != cur_addr->sa_family)) continue; if (cma_any_addr(addr) || cma_any_addr(cur_addr)) return -EADDRNOTAVAIL; if (!cma_addr_cmp(addr, cur_addr)) return -EADDRINUSE; } return 0; } static int cma_use_port(enum rdma_ucm_port_space ps, struct rdma_id_private *id_priv) { struct rdma_bind_list *bind_list; unsigned short snum; int ret; lockdep_assert_held(&lock); snum = ntohs(cma_port(cma_src_addr(id_priv))); if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) return -EACCES; bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum); if (!bind_list) { ret = cma_alloc_port(ps, id_priv, snum); } else { ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr); if (!ret) cma_bind_port(bind_list, id_priv); } return ret; } static enum rdma_ucm_port_space cma_select_inet_ps(struct rdma_id_private *id_priv) { switch (id_priv->id.ps) { case RDMA_PS_TCP: case RDMA_PS_UDP: case RDMA_PS_IPOIB: case RDMA_PS_IB: return id_priv->id.ps; default: return 0; } } static enum rdma_ucm_port_space cma_select_ib_ps(struct rdma_id_private *id_priv) { enum rdma_ucm_port_space ps = 0; struct sockaddr_ib *sib; u64 sid_ps, mask, sid; sib = (struct sockaddr_ib *) cma_src_addr(id_priv); mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK; sid = be64_to_cpu(sib->sib_sid) & mask; if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) { sid_ps = RDMA_IB_IP_PS_IB; ps = RDMA_PS_IB; } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) && (sid == (RDMA_IB_IP_PS_TCP & mask))) { sid_ps = RDMA_IB_IP_PS_TCP; ps = RDMA_PS_TCP; } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) && (sid == (RDMA_IB_IP_PS_UDP & mask))) { sid_ps = RDMA_IB_IP_PS_UDP; ps = RDMA_PS_UDP; } if (ps) { sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib))); sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK | be64_to_cpu(sib->sib_sid_mask)); } return ps; } static int cma_get_port(struct rdma_id_private *id_priv) { enum rdma_ucm_port_space ps; int ret; if (cma_family(id_priv) != AF_IB) ps = cma_select_inet_ps(id_priv); else ps = cma_select_ib_ps(id_priv); if (!ps) return -EPROTONOSUPPORT; mutex_lock(&lock); if (cma_any_port(cma_src_addr(id_priv))) ret = cma_alloc_any_port(ps, id_priv); else ret = cma_use_port(ps, id_priv); mutex_unlock(&lock); return ret; } static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, struct sockaddr *addr) { #if IS_ENABLED(CONFIG_IPV6) struct sockaddr_in6 *sin6; if (addr->sa_family != AF_INET6) return 0; sin6 = (struct sockaddr_in6 *) addr; if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)) return 0; if (!sin6->sin6_scope_id) return -EINVAL; dev_addr->bound_dev_if = sin6->sin6_scope_id; #endif return 0; } int rdma_listen(struct rdma_cm_id *id, int backlog) { struct rdma_id_private *id_priv = container_of(id, struct rdma_id_private, id); int ret; if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) { struct sockaddr_in any_in = { .sin_family = AF_INET, .sin_addr.s_addr = htonl(INADDR_ANY), }; /* For a well behaved ULP state will be RDMA_CM_IDLE */ ret = rdma_bind_addr(id, (struct sockaddr *)&any_in); if (ret) return ret; if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN))) return -EINVAL; } /* * Once the ID reaches RDMA_CM_LISTEN it is not allowed to be reusable * any more, and has to be unique in the bind list. */ if (id_priv->reuseaddr) { mutex_lock(&lock); ret = cma_check_port(id_priv->bind_list, id_priv, 0); if (!ret) id_priv->reuseaddr = 0; mutex_unlock(&lock); if (ret) goto err; } id_priv->backlog = backlog; if (id_priv->cma_dev) { if (rdma_cap_ib_cm(id->device, 1)) { ret = cma_ib_listen(id_priv); if (ret) goto err; } else if (rdma_cap_iw_cm(id->device, 1)) { ret = cma_iw_listen(id_priv, backlog); if (ret) goto err; } else { ret = -ENOSYS; goto err; } } else { ret = cma_listen_on_all(id_priv); if (ret) goto err; } return 0; err: id_priv->backlog = 0; /* * All the failure paths that lead here will not allow the req_handler's * to have run. */ cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND); return ret; } EXPORT_SYMBOL(rdma_listen); static int rdma_bind_addr_dst(struct rdma_id_private *id_priv, struct sockaddr *addr, const struct sockaddr *daddr) { struct sockaddr *id_daddr; int ret; if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 && addr->sa_family != AF_IB) return -EAFNOSUPPORT; if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND)) return -EINVAL; ret = cma_check_linklocal(&id_priv->id.route.addr.dev_addr, addr); if (ret) goto err1; memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr)); if (!cma_any_addr(addr)) { ret = cma_translate_addr(addr, &id_priv->id.route.addr.dev_addr); if (ret) goto err1; ret = cma_acquire_dev_by_src_ip(id_priv); if (ret) goto err1; } if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { if (addr->sa_family == AF_INET) id_priv->afonly = 1; #if IS_ENABLED(CONFIG_IPV6) else if (addr->sa_family == AF_INET6) { struct net *net = id_priv->id.route.addr.dev_addr.net; id_priv->afonly = net->ipv6.sysctl.bindv6only; } #endif } id_daddr = cma_dst_addr(id_priv); if (daddr != id_daddr) memcpy(id_daddr, daddr, rdma_addr_size(addr)); id_daddr->sa_family = addr->sa_family; ret = cma_get_port(id_priv); if (ret) goto err2; if (!cma_any_addr(addr)) rdma_restrack_add(&id_priv->res); return 0; err2: if (id_priv->cma_dev) cma_release_dev(id_priv); err1: cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE); return ret; } static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, const struct sockaddr *dst_addr) { struct rdma_id_private *id_priv = container_of(id, struct rdma_id_private, id); struct sockaddr_storage zero_sock = {}; if (src_addr && src_addr->sa_family) return rdma_bind_addr_dst(id_priv, src_addr, dst_addr); /* * When the src_addr is not specified, automatically supply an any addr */ zero_sock.ss_family = dst_addr->sa_family; if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) { struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *)&zero_sock; struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *)dst_addr; src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id; } else if (dst_addr->sa_family == AF_IB) { ((struct sockaddr_ib *)&zero_sock)->sib_pkey = ((struct sockaddr_ib *)dst_addr)->sib_pkey; } return rdma_bind_addr_dst(id_priv, (struct sockaddr *)&zero_sock, dst_addr); } /* * If required, resolve the source address for bind and leave the id_priv in * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior * calls made by ULP, a previously bound ID will not be re-bound and src_addr is * ignored. */ static int resolve_prepare_src(struct rdma_id_private *id_priv, struct sockaddr *src_addr, const struct sockaddr *dst_addr) { int ret; if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) { /* For a well behaved ULP state will be RDMA_CM_IDLE */ ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr); if (ret) return ret; if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))) return -EINVAL; } else { memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); } if (cma_family(id_priv) != dst_addr->sa_family) { ret = -EINVAL; goto err_state; } return 0; err_state: cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); return ret; } int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, const struct sockaddr *dst_addr, unsigned long timeout_ms) { struct rdma_id_private *id_priv = container_of(id, struct rdma_id_private, id); int ret; ret = resolve_prepare_src(id_priv, src_addr, dst_addr); if (ret) return ret; if (cma_any_addr(dst_addr)) { ret = cma_resolve_loopback(id_priv); } else { if (dst_addr->sa_family == AF_IB) { ret = cma_resolve_ib_addr(id_priv); } else { /* * The FSM can return back to RDMA_CM_ADDR_BOUND after * rdma_resolve_ip() is called, eg through the error * path in addr_handler(). If this happens the existing * request must be canceled before issuing a new one. * Since canceling a request is a bit slow and this * oddball path is rare, keep track once a request has * been issued. The track turns out to be a permanent * state since this is the only cancel as it is * immediately before rdma_resolve_ip(). */ if (id_priv->used_resolve_ip) rdma_addr_cancel(&id->route.addr.dev_addr); else id_priv->used_resolve_ip = 1; ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr, &id->route.addr.dev_addr, timeout_ms, addr_handler, false, id_priv); } } if (ret) goto err; return 0; err: cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); return ret; } EXPORT_SYMBOL(rdma_resolve_addr); int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) { struct rdma_id_private *id_priv = container_of(id, struct rdma_id_private, id); return rdma_bind_addr_dst(id_priv, addr, cma_dst_addr(id_priv)); } EXPORT_SYMBOL(rdma_bind_addr); static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv) { struct cma_hdr *cma_hdr; cma_hdr = hdr; cma_hdr->cma_version = CMA_VERSION; if (cma_family(id_priv) == AF_INET) { struct sockaddr_in *src4, *dst4; src4 = (struct sockaddr_in *) cma_src_addr(id_priv); dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv); cma_set_ip_ver(cma_hdr, 4); cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; cma_hdr->port = src4->sin_port; } else if (cma_family(id_priv) == AF_INET6) { struct sockaddr_in6 *src6, *dst6; src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv); cma_set_ip_ver(cma_hdr, 6); cma_hdr->src_addr.ip6 = src6->sin6_addr; cma_hdr->dst_addr.ip6 = dst6->sin6_addr; cma_hdr->port = src6->sin6_port; } return 0; } static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, const struct ib_cm_event *ib_event) { struct rdma_id_private *id_priv = cm_id->context; struct rdma_cm_event event = {}; const struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; int ret; mutex_lock(&id_priv->handler_mutex); if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) goto out; switch (ib_event->event) { case IB_CM_SIDR_REQ_ERROR: event.event = RDMA_CM_EVENT_UNREACHABLE; event.status = -ETIMEDOUT; break; case IB_CM_SIDR_REP_RECEIVED: event.param.ud.private_data = ib_event->private_data; event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; if (rep->status != IB_SIDR_SUCCESS) { event.event = RDMA_CM_EVENT_UNREACHABLE; event.status = ib_event->param.sidr_rep_rcvd.status; pr_debug_ratelimited("RDMA CM: UNREACHABLE: bad SIDR reply. status %d\n", event.status); break; } ret = cma_set_qkey(id_priv, rep->qkey); if (ret) { pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to set qkey. status %d\n", ret); event.event = RDMA_CM_EVENT_ADDR_ERROR; event.status = ret; break; } ib_init_ah_attr_from_path(id_priv->id.device, id_priv->id.port_num, id_priv->id.route.path_rec, &event.param.ud.ah_attr, rep->sgid_attr); event.param.ud.qp_num = rep->qpn; event.param.ud.qkey = rep->qkey; event.event = RDMA_CM_EVENT_ESTABLISHED; event.status = 0; break; default: pr_err("RDMA CMA: unexpected IB CM event: %d\n", ib_event->event); goto out; } ret = cma_cm_event_handler(id_priv, &event); rdma_destroy_ah_attr(&event.param.ud.ah_attr); if (ret) { /* Destroy the CM ID by returning a non-zero value. */ id_priv->cm_id.ib = NULL; destroy_id_handler_unlock(id_priv); return ret; } out: mutex_unlock(&id_priv->handler_mutex); return 0; } static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, struct rdma_conn_param *conn_param) { struct ib_cm_sidr_req_param req; struct ib_cm_id *id; void *private_data; u8 offset; int ret; memset(&req, 0, sizeof req); offset = cma_user_data_offset(id_priv); if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len)) return -EINVAL; if (req.private_data_len) { private_data = kzalloc(req.private_data_len, GFP_ATOMIC); if (!private_data) return -ENOMEM; } else { private_data = NULL; } if (conn_param->private_data && conn_param->private_data_len) memcpy(private_data + offset, conn_param->private_data, conn_param->private_data_len); if (private_data) { ret = cma_format_hdr(private_data, id_priv); if (ret) goto out; req.private_data = private_data; } id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler, id_priv); if (IS_ERR(id)) { ret = PTR_ERR(id); goto out; } id_priv->cm_id.ib = id; req.path = id_priv->id.route.path_rec; req.sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr; req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); req.max_cm_retries = CMA_MAX_CM_RETRIES; trace_cm_send_sidr_req(id_priv); ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); if (ret) { ib_destroy_cm_id(id_priv->cm_id.ib); id_priv->cm_id.ib = NULL; } out: kfree(private_data); return ret; } static int cma_connect_ib(struct rdma_id_private *id_priv, struct rdma_conn_param *conn_param) { struct ib_cm_req_param req; struct rdma_route *route; void *private_data; struct ib_cm_id *id; u8 offset; int ret; memset(&req, 0, sizeof req); offset = cma_user_data_offset(id_priv); if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len)) return -EINVAL; if (req.private_data_len) { private_data = kzalloc(req.private_data_len, GFP_ATOMIC); if (!private_data) return -ENOMEM; } else { private_data = NULL; } if (conn_param->private_data && conn_param->private_data_len) memcpy(private_data + offset, conn_param->private_data, conn_param->private_data_len); id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv); if (IS_ERR(id)) { ret = PTR_ERR(id); goto out; } id_priv->cm_id.ib = id; route = &id_priv->id.route; if (private_data) { ret = cma_format_hdr(private_data, id_priv); if (ret) goto out; req.private_data = private_data; } req.primary_path = &route->path_rec[0]; req.primary_path_inbound = route->path_rec_inbound; req.primary_path_outbound = route->path_rec_outbound; if (route->num_pri_alt_paths == 2) req.alternate_path = &route->path_rec[1]; req.ppath_sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr; /* Alternate path SGID attribute currently unsupported */ req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); req.qp_num = id_priv->qp_num; req.qp_type = id_priv->id.qp_type; req.starting_psn = id_priv->seq_num; req.responder_resources = conn_param->responder_resources; req.initiator_depth = conn_param->initiator_depth; req.flow_control = conn_param->flow_control; req.retry_count = min_t(u8, 7, conn_param->retry_count); req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; req.max_cm_retries = CMA_MAX_CM_RETRIES; req.srq = id_priv->srq ? 1 : 0; req.ece.vendor_id = id_priv->ece.vendor_id; req.ece.attr_mod = id_priv->ece.attr_mod; trace_cm_send_req(id_priv); ret = ib_send_cm_req(id_priv->cm_id.ib, &req); out: if (ret && !IS_ERR(id)) { ib_destroy_cm_id(id); id_priv->cm_id.ib = NULL; } kfree(private_data); return ret; } static int cma_connect_iw(struct rdma_id_private *id_priv, struct rdma_conn_param *conn_param) { struct iw_cm_id *cm_id; int ret; struct iw_cm_conn_param iw_param; cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); if (IS_ERR(cm_id)) return PTR_ERR(cm_id); mutex_lock(&id_priv->qp_mutex); cm_id->tos = id_priv->tos; cm_id->tos_set = id_priv->tos_set; mutex_unlock(&id_priv->qp_mutex); id_priv->cm_id.iw = cm_id; memcpy(&cm_id->local_addr, cma_src_addr(id_priv), rdma_addr_size(cma_src_addr(id_priv))); memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv), rdma_addr_size(cma_dst_addr(id_priv))); ret = cma_modify_qp_rtr(id_priv, conn_param); if (ret) goto out; if (conn_param) { iw_param.ord = conn_param->initiator_depth; iw_param.ird = conn_param->responder_resources; iw_param.private_data = conn_param->private_data; iw_param.private_data_len = conn_param->private_data_len; iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; } else { memset(&iw_param, 0, sizeof iw_param); iw_param.qpn = id_priv->qp_num; } ret = iw_cm_connect(cm_id, &iw_param); out: if (ret) { iw_destroy_cm_id(cm_id); id_priv->cm_id.iw = NULL; } return ret; } /** * rdma_connect_locked - Initiate an active connection request. * @id: Connection identifier to connect. * @conn_param: Connection information used for connected QPs. * * Same as rdma_connect() but can only be called from the * RDMA_CM_EVENT_ROUTE_RESOLVED handler callback. */ int rdma_connect_locked(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) { struct rdma_id_private *id_priv = container_of(id, struct rdma_id_private, id); int ret; if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) return -EINVAL; if (!id->qp) { id_priv->qp_num = conn_param->qp_num; id_priv->srq = conn_param->srq; } if (rdma_cap_ib_cm(id->device, id->port_num)) { if (id->qp_type == IB_QPT_UD) ret = cma_resolve_ib_udp(id_priv, conn_param); else ret = cma_connect_ib(id_priv, conn_param); } else if (rdma_cap_iw_cm(id->device, id->port_num)) { ret = cma_connect_iw(id_priv, conn_param); } else { ret = -ENOSYS; } if (ret) goto err_state; return 0; err_state: cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED); return ret; } EXPORT_SYMBOL(rdma_connect_locked); /** * rdma_connect - Initiate an active connection request. * @id: Connection identifier to connect. * @conn_param: Connection information used for connected QPs. * * Users must have resolved a route for the rdma_cm_id to connect with by having * called rdma_resolve_route before calling this routine. * * This call will either connect to a remote QP or obtain remote QP information * for unconnected rdma_cm_id's. The actual operation is based on the * rdma_cm_id's port space. */ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) { struct rdma_id_private *id_priv = container_of(id, struct rdma_id_private, id); int ret; mutex_lock(&id_priv->handler_mutex); ret = rdma_connect_locked(id, conn_param); mutex_unlock(&id_priv->handler_mutex); return ret; } EXPORT_SYMBOL(rdma_connect); /** * rdma_connect_ece - Initiate an active connection request with ECE data. * @id: Connection identifier to connect. * @conn_param: Connection information used for connected QPs. * @ece: ECE parameters * * See rdma_connect() explanation. */ int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, struct rdma_ucm_ece *ece) { struct rdma_id_private *id_priv = container_of(id, struct rdma_id_private, id); id_priv->ece.vendor_id = ece->vendor_id; id_priv->ece.attr_mod = ece->attr_mod; return rdma_connect(id, conn_param); } EXPORT_SYMBOL(rdma_connect_ece); static int cma_accept_ib(struct rdma_id_private *id_priv, struct rdma_conn_param *conn_param) { struct ib_cm_rep_param rep; int ret; ret = cma_modify_qp_rtr(id_priv, conn_param); if (ret) goto out; ret = cma_modify_qp_rts(id_priv, conn_param); if (ret) goto out; memset(&rep, 0, sizeof rep); rep.qp_num = id_priv->qp_num; rep.starting_psn = id_priv->seq_num; rep.private_data = conn_param->private_data; rep.private_data_len = conn_param->private_data_len; rep.responder_resources = conn_param->responder_resources; rep.initiator_depth = conn_param->initiator_depth; rep.failover_accepted = 0; rep.flow_control = conn_param->flow_control; rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); rep.srq = id_priv->srq ? 1 : 0; rep.ece.vendor_id = id_priv->ece.vendor_id; rep.ece.attr_mod = id_priv->ece.attr_mod; trace_cm_send_rep(id_priv); ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); out: return ret; } static int cma_accept_iw(struct rdma_id_private *id_priv, struct rdma_conn_param *conn_param) { struct iw_cm_conn_param iw_param; int ret; if (!conn_param) return -EINVAL; ret = cma_modify_qp_rtr(id_priv, conn_param); if (ret) return ret; iw_param.ord = conn_param->initiator_depth; iw_param.ird = conn_param->responder_resources; iw_param.private_data = conn_param->private_data; iw_param.private_data_len = conn_param->private_data_len; if (id_priv->id.qp) iw_param.qpn = id_priv->qp_num; else iw_param.qpn = conn_param->qp_num; return iw_cm_accept(id_priv->cm_id.iw, &iw_param); } static int cma_send_sidr_rep(struct rdma_id_private *id_priv, enum ib_cm_sidr_status status, u32 qkey, const void *private_data, int private_data_len) { struct ib_cm_sidr_rep_param rep; int ret; memset(&rep, 0, sizeof rep); rep.status = status; if (status == IB_SIDR_SUCCESS) { if (qkey) ret = cma_set_qkey(id_priv, qkey); else ret = cma_set_default_qkey(id_priv); if (ret) return ret; rep.qp_num = id_priv->qp_num; rep.qkey = id_priv->qkey; rep.ece.vendor_id = id_priv->ece.vendor_id; rep.ece.attr_mod = id_priv->ece.attr_mod; } rep.private_data = private_data; rep.private_data_len = private_data_len; trace_cm_send_sidr_rep(id_priv); return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); } /** * rdma_accept - Called to accept a connection request or response. * @id: Connection identifier associated with the request. * @conn_param: Information needed to establish the connection. This must be * provided if accepting a connection request. If accepting a connection * response, this parameter must be NULL. * * Typically, this routine is only called by the listener to accept a connection * request. It must also be called on the active side of a connection if the * user is performing their own QP transitions. * * In the case of error, a reject message is sent to the remote side and the * state of the qp associated with the id is modified to error, such that any * previously posted receive buffers would be flushed. * * This function is for use by kernel ULPs and must be called from under the * handler callback. */ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) { struct rdma_id_private *id_priv = container_of(id, struct rdma_id_private, id); int ret; lockdep_assert_held(&id_priv->handler_mutex); if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) return -EINVAL; if (!id->qp && conn_param) { id_priv->qp_num = conn_param->qp_num; id_priv->srq = conn_param->srq; } if (rdma_cap_ib_cm(id->device, id->port_num)) { if (id->qp_type == IB_QPT_UD) { if (conn_param) ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, conn_param->qkey, conn_param->private_data, conn_param->private_data_len); else ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 0, NULL, 0); } else { if (conn_param) ret = cma_accept_ib(id_priv, conn_param); else ret = cma_rep_recv(id_priv); } } else if (rdma_cap_iw_cm(id->device, id->port_num)) { ret = cma_accept_iw(id_priv, conn_param); } else { ret = -ENOSYS; } if (ret) goto reject; return 0; reject: cma_modify_qp_err(id_priv); rdma_reject(id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED); return ret; } EXPORT_SYMBOL(rdma_accept); int rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, struct rdma_ucm_ece *ece) { struct rdma_id_private *id_priv = container_of(id, struct rdma_id_private, id); id_priv->ece.vendor_id = ece->vendor_id; id_priv->ece.attr_mod = ece->attr_mod; return rdma_accept(id, conn_param); } EXPORT_SYMBOL(rdma_accept_ece); void rdma_lock_handler(struct rdma_cm_id *id) { struct rdma_id_private *id_priv = container_of(id, struct rdma_id_private, id); mutex_lock(&id_priv->handler_mutex); } EXPORT_SYMBOL(rdma_lock_handler); void rdma_unlock_handler(struct rdma_cm_id *id) { struct rdma_id_private *id_priv = container_of(id, struct rdma_id_private, id); mutex_unlock(&id_priv->handler_mutex); } EXPORT_SYMBOL(rdma_unlock_handler); int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) { struct rdma_id_private *id_priv; int ret; id_priv = container_of(id, struct rdma_id_private, id); if (!id_priv->cm_id.ib) return -EINVAL; switch (id->device->node_type) { case RDMA_NODE_IB_CA: ret = ib_cm_notify(id_priv->cm_id.ib, event); break; default: ret = 0; break; } return ret; } EXPORT_SYMBOL(rdma_notify); int rdma_reject(struct rdma_cm_id *id, const void *private_data, u8 private_data_len, u8 reason) { struct rdma_id_private *id_priv; int ret; id_priv = container_of(id, struct rdma_id_private, id); if (!id_priv->cm_id.ib) return -EINVAL; if (rdma_cap_ib_cm(id->device, id->port_num)) { if (id->qp_type == IB_QPT_UD) { ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0, private_data, private_data_len); } else { trace_cm_send_rej(id_priv); ret = ib_send_cm_rej(id_priv->cm_id.ib, reason, NULL, 0, private_data, private_data_len); } } else if (rdma_cap_iw_cm(id->device, id->port_num)) { ret = iw_cm_reject(id_priv->cm_id.iw, private_data, private_data_len); } else { ret = -ENOSYS; } return ret; } EXPORT_SYMBOL(rdma_reject); int rdma_disconnect(struct rdma_cm_id *id) { struct rdma_id_private *id_priv; int ret; id_priv = container_of(id, struct rdma_id_private, id); if (!id_priv->cm_id.ib) return -EINVAL; if (rdma_cap_ib_cm(id->device, id->port_num)) { ret = cma_modify_qp_err(id_priv); if (ret) goto out; /* Initiate or respond to a disconnect. */ trace_cm_disconnect(id_priv); if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) { if (!ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0)) trace_cm_sent_drep(id_priv); } else { trace_cm_sent_dreq(id_priv); } } else if (rdma_cap_iw_cm(id->device, id->port_num)) { ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); } else ret = -EINVAL; out: return ret; } EXPORT_SYMBOL(rdma_disconnect); static void cma_make_mc_event(int status, struct rdma_id_private *id_priv, struct ib_sa_multicast *multicast, struct rdma_cm_event *event, struct cma_multicast *mc) { struct rdma_dev_addr *dev_addr; enum ib_gid_type gid_type; struct net_device *ndev; if (status) pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n", status); event->status = status; event->param.ud.private_data = mc->context; if (status) { event->event = RDMA_CM_EVENT_MULTICAST_ERROR; return; } dev_addr = &id_priv->id.route.addr.dev_addr; ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); gid_type = id_priv->cma_dev ->default_gid_type[id_priv->id.port_num - rdma_start_port( id_priv->cma_dev->device)]; event->event = RDMA_CM_EVENT_MULTICAST_JOIN; if (ib_init_ah_from_mcmember(id_priv->id.device, id_priv->id.port_num, &multicast->rec, ndev, gid_type, &event->param.ud.ah_attr)) { event->event = RDMA_CM_EVENT_MULTICAST_ERROR; goto out; } event->param.ud.qp_num = 0xFFFFFF; event->param.ud.qkey = id_priv->qkey; out: dev_put(ndev); } static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) { struct cma_multicast *mc = multicast->context; struct rdma_id_private *id_priv = mc->id_priv; struct rdma_cm_event event = {}; int ret = 0; mutex_lock(&id_priv->handler_mutex); if (READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL || READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING) goto out; ret = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); if (!ret) { cma_make_mc_event(status, id_priv, multicast, &event, mc); ret = cma_cm_event_handler(id_priv, &event); } rdma_destroy_ah_attr(&event.param.ud.ah_attr); WARN_ON(ret); out: mutex_unlock(&id_priv->handler_mutex); return 0; } static void cma_set_mgid(struct rdma_id_private *id_priv, struct sockaddr *addr, union ib_gid *mgid) { unsigned char mc_map[MAX_ADDR_LEN]; struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; struct sockaddr_in *sin = (struct sockaddr_in *) addr; struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr; if (cma_any_addr(addr)) { memset(mgid, 0, sizeof *mgid); } else if ((addr->sa_family == AF_INET6) && ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) == 0xFF10A01B)) { /* IPv6 address is an SA assigned MGID. */ memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); } else if (addr->sa_family == AF_IB) { memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid); } else if (addr->sa_family == AF_INET6) { ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map); if (id_priv->id.ps == RDMA_PS_UDP) mc_map[7] = 0x01; /* Use RDMA CM signature */ *mgid = *(union ib_gid *) (mc_map + 4); } else { ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map); if (id_priv->id.ps == RDMA_PS_UDP) mc_map[7] = 0x01; /* Use RDMA CM signature */ *mgid = *(union ib_gid *) (mc_map + 4); } } static int cma_join_ib_multicast(struct rdma_id_private *id_priv, struct cma_multicast *mc) { struct ib_sa_mcmember_rec rec; struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; ib_sa_comp_mask comp_mask; int ret; ib_addr_get_mgid(dev_addr, &rec.mgid); ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, &rec.mgid, &rec); if (ret) return ret; if (!id_priv->qkey) { ret = cma_set_default_qkey(id_priv); if (ret) return ret; } cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); rec.qkey = cpu_to_be32(id_priv->qkey); rdma_addr_get_sgid(dev_addr, &rec.port_gid); rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); rec.join_state = mc->join_state; comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE | IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL | IB_SA_MCMEMBER_REC_FLOW_LABEL | IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; if (id_priv->id.ps == RDMA_PS_IPOIB) comp_mask |= IB_SA_MCMEMBER_REC_RATE | IB_SA_MCMEMBER_REC_RATE_SELECTOR | IB_SA_MCMEMBER_REC_MTU_SELECTOR | IB_SA_MCMEMBER_REC_MTU | IB_SA_MCMEMBER_REC_HOP_LIMIT; mc->sa_mc = ib_sa_join_multicast(&sa_client, id_priv->id.device, id_priv->id.port_num, &rec, comp_mask, GFP_KERNEL, cma_ib_mc_handler, mc); return PTR_ERR_OR_ZERO(mc->sa_mc); } static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid, enum ib_gid_type gid_type) { struct sockaddr_in *sin = (struct sockaddr_in *)addr; struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr; if (cma_any_addr(addr)) { memset(mgid, 0, sizeof *mgid); } else if (addr->sa_family == AF_INET6) { memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); } else { mgid->raw[0] = (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0xff; mgid->raw[1] = (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0x0e; mgid->raw[2] = 0; mgid->raw[3] = 0; mgid->raw[4] = 0; mgid->raw[5] = 0; mgid->raw[6] = 0; mgid->raw[7] = 0; mgid->raw[8] = 0; mgid->raw[9] = 0; mgid->raw[10] = 0xff; mgid->raw[11] = 0xff; *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr; } } static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, struct cma_multicast *mc) { struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; int err = 0; struct sockaddr *addr = (struct sockaddr *)&mc->addr; struct net_device *ndev = NULL; struct ib_sa_multicast ib = {}; enum ib_gid_type gid_type; bool send_only; send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); if (cma_zero_addr(addr)) return -EINVAL; gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - rdma_start_port(id_priv->cma_dev->device)]; cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type); ib.rec.pkey = cpu_to_be16(0xffff); if (dev_addr->bound_dev_if) ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); if (!ndev) return -ENODEV; ib.rec.rate = IB_RATE_PORT_CURRENT; ib.rec.hop_limit = 1; ib.rec.mtu = iboe_get_mtu(ndev->mtu); if (addr->sa_family == AF_INET) { if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { ib.rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; if (!send_only) { err = cma_igmp_send(ndev, &ib.rec.mgid, true); } } } else { if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) err = -ENOTSUPP; } dev_put(ndev); if (err || !ib.rec.mtu) return err ?: -EINVAL; if (!id_priv->qkey) cma_set_default_qkey(id_priv); rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, &ib.rec.port_gid); INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler); cma_make_mc_event(0, id_priv, &ib, &mc->iboe_join.event, mc); queue_work(cma_wq, &mc->iboe_join.work); return 0; } int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, u8 join_state, void *context) { struct rdma_id_private *id_priv = container_of(id, struct rdma_id_private, id); struct cma_multicast *mc; int ret; /* Not supported for kernel QPs */ if (WARN_ON(id->qp)) return -EINVAL; /* ULP is calling this wrong. */ if (!id->device || (READ_ONCE(id_priv->state) != RDMA_CM_ADDR_BOUND && READ_ONCE(id_priv->state) != RDMA_CM_ADDR_RESOLVED)) return -EINVAL; if (id_priv->id.qp_type != IB_QPT_UD) return -EINVAL; mc = kzalloc(sizeof(*mc), GFP_KERNEL); if (!mc) return -ENOMEM; memcpy(&mc->addr, addr, rdma_addr_size(addr)); mc->context = context; mc->id_priv = id_priv; mc->join_state = join_state; if (rdma_protocol_roce(id->device, id->port_num)) { ret = cma_iboe_join_multicast(id_priv, mc); if (ret) goto out_err; } else if (rdma_cap_ib_mcast(id->device, id->port_num)) { ret = cma_join_ib_multicast(id_priv, mc); if (ret) goto out_err; } else { ret = -ENOSYS; goto out_err; } spin_lock(&id_priv->lock); list_add(&mc->list, &id_priv->mc_list); spin_unlock(&id_priv->lock); return 0; out_err: kfree(mc); return ret; } EXPORT_SYMBOL(rdma_join_multicast); void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) { struct rdma_id_private *id_priv; struct cma_multicast *mc; id_priv = container_of(id, struct rdma_id_private, id); spin_lock_irq(&id_priv->lock); list_for_each_entry(mc, &id_priv->mc_list, list) { if (memcmp(&mc->addr, addr, rdma_addr_size(addr)) != 0) continue; list_del(&mc->list); spin_unlock_irq(&id_priv->lock); WARN_ON(id_priv->cma_dev->device != id->device); destroy_mc(id_priv, mc); return; } spin_unlock_irq(&id_priv->lock); } EXPORT_SYMBOL(rdma_leave_multicast); static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv) { struct rdma_dev_addr *dev_addr; struct cma_work *work; dev_addr = &id_priv->id.route.addr.dev_addr; if ((dev_addr->bound_dev_if == ndev->ifindex) && (net_eq(dev_net(ndev), dev_addr->net)) && memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) { pr_info("RDMA CM addr change for ndev %s used by id %p\n", ndev->name, &id_priv->id); work = kzalloc(sizeof *work, GFP_KERNEL); if (!work) return -ENOMEM; INIT_WORK(&work->work, cma_work_handler); work->id = id_priv; work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; cma_id_get(id_priv); queue_work(cma_wq, &work->work); } return 0; } static int cma_netdev_callback(struct notifier_block *self, unsigned long event, void *ptr) { struct net_device *ndev = netdev_notifier_info_to_dev(ptr); struct cma_device *cma_dev; struct rdma_id_private *id_priv; int ret = NOTIFY_DONE; if (event != NETDEV_BONDING_FAILOVER) return NOTIFY_DONE; if (!netif_is_bond_master(ndev)) return NOTIFY_DONE; mutex_lock(&lock); list_for_each_entry(cma_dev, &dev_list, list) list_for_each_entry(id_priv, &cma_dev->id_list, device_item) { ret = cma_netdev_change(ndev, id_priv); if (ret) goto out; } out: mutex_unlock(&lock); return ret; } static void cma_netevent_work_handler(struct work_struct *_work) { struct rdma_id_private *id_priv = container_of(_work, struct rdma_id_private, id.net_work); struct rdma_cm_event event = {}; mutex_lock(&id_priv->handler_mutex); if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) goto out_unlock; event.event = RDMA_CM_EVENT_UNREACHABLE; event.status = -ETIMEDOUT; if (cma_cm_event_handler(id_priv, &event)) { __acquire(&id_priv->handler_mutex); id_priv->cm_id.ib = NULL; cma_id_put(id_priv); destroy_id_handler_unlock(id_priv); return; } out_unlock: mutex_unlock(&id_priv->handler_mutex); cma_id_put(id_priv); } static int cma_netevent_callback(struct notifier_block *self, unsigned long event, void *ctx) { struct id_table_entry *ips_node = NULL; struct rdma_id_private *current_id; struct neighbour *neigh = ctx; unsigned long flags; if (event != NETEVENT_NEIGH_UPDATE) return NOTIFY_DONE; spin_lock_irqsave(&id_table_lock, flags); if (neigh->tbl->family == AF_INET6) { struct sockaddr_in6 neigh_sock_6; neigh_sock_6.sin6_family = AF_INET6; neigh_sock_6.sin6_addr = *(struct in6_addr *)neigh->primary_key; ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex, (struct sockaddr *)&neigh_sock_6); } else if (neigh->tbl->family == AF_INET) { struct sockaddr_in neigh_sock_4; neigh_sock_4.sin_family = AF_INET; neigh_sock_4.sin_addr.s_addr = *(__be32 *)(neigh->primary_key); ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex, (struct sockaddr *)&neigh_sock_4); } else goto out; if (!ips_node) goto out; list_for_each_entry(current_id, &ips_node->id_list, id_list_entry) { if (!memcmp(current_id->id.route.addr.dev_addr.dst_dev_addr, neigh->ha, ETH_ALEN)) continue; INIT_WORK(&current_id->id.net_work, cma_netevent_work_handler); cma_id_get(current_id); queue_work(cma_wq, &current_id->id.net_work); } out: spin_unlock_irqrestore(&id_table_lock, flags); return NOTIFY_DONE; } static struct notifier_block cma_nb = { .notifier_call = cma_netdev_callback }; static struct notifier_block cma_netevent_cb = { .notifier_call = cma_netevent_callback }; static void cma_send_device_removal_put(struct rdma_id_private *id_priv) { struct rdma_cm_event event = { .event = RDMA_CM_EVENT_DEVICE_REMOVAL }; enum rdma_cm_state state; unsigned long flags; mutex_lock(&id_priv->handler_mutex); /* Record that we want to remove the device */ spin_lock_irqsave(&id_priv->lock, flags); state = id_priv->state; if (state == RDMA_CM_DESTROYING || state == RDMA_CM_DEVICE_REMOVAL) { spin_unlock_irqrestore(&id_priv->lock, flags); mutex_unlock(&id_priv->handler_mutex); cma_id_put(id_priv); return; } id_priv->state = RDMA_CM_DEVICE_REMOVAL; spin_unlock_irqrestore(&id_priv->lock, flags); if (cma_cm_event_handler(id_priv, &event)) { /* * At this point the ULP promises it won't call * rdma_destroy_id() concurrently */ cma_id_put(id_priv); mutex_unlock(&id_priv->handler_mutex); trace_cm_id_destroy(id_priv); _destroy_id(id_priv, state); return; } mutex_unlock(&id_priv->handler_mutex); /* * If this races with destroy then the thread that first assigns state * to a destroying does the cancel. */ cma_cancel_operation(id_priv, state); cma_id_put(id_priv); } static void cma_process_remove(struct cma_device *cma_dev) { mutex_lock(&lock); while (!list_empty(&cma_dev->id_list)) { struct rdma_id_private *id_priv = list_first_entry( &cma_dev->id_list, struct rdma_id_private, device_item); list_del_init(&id_priv->listen_item); list_del_init(&id_priv->device_item); cma_id_get(id_priv); mutex_unlock(&lock); cma_send_device_removal_put(id_priv); mutex_lock(&lock); } mutex_unlock(&lock); cma_dev_put(cma_dev); wait_for_completion(&cma_dev->comp); } static bool cma_supported(struct ib_device *device) { u32 i; rdma_for_each_port(device, i) { if (rdma_cap_ib_cm(device, i) || rdma_cap_iw_cm(device, i)) return true; } return false; } static int cma_add_one(struct ib_device *device) { struct rdma_id_private *to_destroy; struct cma_device *cma_dev; struct rdma_id_private *id_priv; unsigned long supported_gids = 0; int ret; u32 i; if (!cma_supported(device)) return -EOPNOTSUPP; cma_dev = kmalloc(sizeof(*cma_dev), GFP_KERNEL); if (!cma_dev) return -ENOMEM; cma_dev->device = device; cma_dev->default_gid_type = kcalloc(device->phys_port_cnt, sizeof(*cma_dev->default_gid_type), GFP_KERNEL); if (!cma_dev->default_gid_type) { ret = -ENOMEM; goto free_cma_dev; } cma_dev->default_roce_tos = kcalloc(device->phys_port_cnt, sizeof(*cma_dev->default_roce_tos), GFP_KERNEL); if (!cma_dev->default_roce_tos) { ret = -ENOMEM; goto free_gid_type; } rdma_for_each_port (device, i) { supported_gids = roce_gid_type_mask_support(device, i); WARN_ON(!supported_gids); if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE)) cma_dev->default_gid_type[i - rdma_start_port(device)] = CMA_PREFERRED_ROCE_GID_TYPE; else cma_dev->default_gid_type[i - rdma_start_port(device)] = find_first_bit(&supported_gids, BITS_PER_LONG); cma_dev->default_roce_tos[i - rdma_start_port(device)] = 0; } init_completion(&cma_dev->comp); refcount_set(&cma_dev->refcount, 1); INIT_LIST_HEAD(&cma_dev->id_list); ib_set_client_data(device, &cma_client, cma_dev); mutex_lock(&lock); list_add_tail(&cma_dev->list, &dev_list); list_for_each_entry(id_priv, &listen_any_list, listen_any_item) { ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy); if (ret) goto free_listen; } mutex_unlock(&lock); trace_cm_add_one(device); return 0; free_listen: list_del(&cma_dev->list); mutex_unlock(&lock); /* cma_process_remove() will delete to_destroy */ cma_process_remove(cma_dev); kfree(cma_dev->default_roce_tos); free_gid_type: kfree(cma_dev->default_gid_type); free_cma_dev: kfree(cma_dev); return ret; } static void cma_remove_one(struct ib_device *device, void *client_data) { struct cma_device *cma_dev = client_data; trace_cm_remove_one(device); mutex_lock(&lock); list_del(&cma_dev->list); mutex_unlock(&lock); cma_process_remove(cma_dev); kfree(cma_dev->default_roce_tos); kfree(cma_dev->default_gid_type); kfree(cma_dev); } static int cma_init_net(struct net *net) { struct cma_pernet *pernet = cma_pernet(net); xa_init(&pernet->tcp_ps); xa_init(&pernet->udp_ps); xa_init(&pernet->ipoib_ps); xa_init(&pernet->ib_ps); return 0; } static void cma_exit_net(struct net *net) { struct cma_pernet *pernet = cma_pernet(net); WARN_ON(!xa_empty(&pernet->tcp_ps)); WARN_ON(!xa_empty(&pernet->udp_ps)); WARN_ON(!xa_empty(&pernet->ipoib_ps)); WARN_ON(!xa_empty(&pernet->ib_ps)); } static struct pernet_operations cma_pernet_operations = { .init = cma_init_net, .exit = cma_exit_net, .id = &cma_pernet_id, .size = sizeof(struct cma_pernet), }; static int __init cma_init(void) { int ret; /* * There is a rare lock ordering dependency in cma_netdev_callback() * that only happens when bonding is enabled. Teach lockdep that rtnl * must never be nested under lock so it can find these without having * to test with bonding. */ if (IS_ENABLED(CONFIG_LOCKDEP)) { rtnl_lock(); mutex_lock(&lock); mutex_unlock(&lock); rtnl_unlock(); } cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM); if (!cma_wq) return -ENOMEM; ret = register_pernet_subsys(&cma_pernet_operations); if (ret) goto err_wq; ib_sa_register_client(&sa_client); register_netdevice_notifier(&cma_nb); register_netevent_notifier(&cma_netevent_cb); ret = ib_register_client(&cma_client); if (ret) goto err; ret = cma_configfs_init(); if (ret) goto err_ib; return 0; err_ib: ib_unregister_client(&cma_client); err: unregister_netevent_notifier(&cma_netevent_cb); unregister_netdevice_notifier(&cma_nb); ib_sa_unregister_client(&sa_client); unregister_pernet_subsys(&cma_pernet_operations); err_wq: destroy_workqueue(cma_wq); return ret; } static void __exit cma_cleanup(void) { cma_configfs_exit(); ib_unregister_client(&cma_client); unregister_netevent_notifier(&cma_netevent_cb); unregister_netdevice_notifier(&cma_nb); ib_sa_unregister_client(&sa_client); unregister_pernet_subsys(&cma_pernet_operations); destroy_workqueue(cma_wq); } module_init(cma_init); module_exit(cma_cleanup);
2 2 2 2 2 2 2 2 2 2 2 3 2 2 2 2 2 2 2 2 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 // SPDX-License-Identifier: GPL-2.0-only /* * Edirol UA-101/UA-1000 driver * Copyright (c) Clemens Ladisch <clemens@ladisch.de> */ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/audio.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include "../usbaudio.h" #include "../midi.h" MODULE_DESCRIPTION("Edirol UA-101/1000 driver"); MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); MODULE_LICENSE("GPL v2"); /* * Should not be lower than the minimum scheduling delay of the host * controller. Some Intel controllers need more than one frame; as long as * that driver doesn't tell us about this, use 1.5 frames just to be sure. */ #define MIN_QUEUE_LENGTH 12 /* Somewhat random. */ #define MAX_QUEUE_LENGTH 30 /* * This magic value optimizes memory usage efficiency for the UA-101's packet * sizes at all sample rates, taking into account the stupid cache pool sizes * that usb_alloc_coherent() uses. */ #define DEFAULT_QUEUE_LENGTH 21 #define MAX_PACKET_SIZE 672 /* hardware specific */ #define MAX_MEMORY_BUFFERS DIV_ROUND_UP(MAX_QUEUE_LENGTH, \ PAGE_SIZE / MAX_PACKET_SIZE) static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; static unsigned int queue_length = 21; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "card index"); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string"); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "enable card"); module_param(queue_length, uint, 0644); MODULE_PARM_DESC(queue_length, "USB queue length in microframes, " __stringify(MIN_QUEUE_LENGTH)"-"__stringify(MAX_QUEUE_LENGTH)); enum { INTF_PLAYBACK, INTF_CAPTURE, INTF_MIDI, INTF_COUNT }; /* bits in struct ua101::states */ enum { USB_CAPTURE_RUNNING, USB_PLAYBACK_RUNNING, ALSA_CAPTURE_OPEN, ALSA_PLAYBACK_OPEN, ALSA_CAPTURE_RUNNING, ALSA_PLAYBACK_RUNNING, CAPTURE_URB_COMPLETED, PLAYBACK_URB_COMPLETED, DISCONNECTED, }; struct ua101 { struct usb_device *dev; struct snd_card *card; struct usb_interface *intf[INTF_COUNT]; int card_index; struct snd_pcm *pcm; struct list_head midi_list; u64 format_bit; unsigned int rate; unsigned int packets_per_second; spinlock_t lock; struct mutex mutex; unsigned long states; /* FIFO to synchronize playback rate to capture rate */ unsigned int rate_feedback_start; unsigned int rate_feedback_count; u8 rate_feedback[MAX_QUEUE_LENGTH]; struct list_head ready_playback_urbs; struct work_struct playback_work; wait_queue_head_t alsa_capture_wait; wait_queue_head_t rate_feedback_wait; wait_queue_head_t alsa_playback_wait; struct ua101_stream { struct snd_pcm_substream *substream; unsigned int usb_pipe; unsigned int channels; unsigned int frame_bytes; unsigned int max_packet_bytes; unsigned int period_pos; unsigned int buffer_pos; unsigned int queue_length; struct ua101_urb { struct urb urb; struct usb_iso_packet_descriptor iso_frame_desc[1]; struct list_head ready_list; } *urbs[MAX_QUEUE_LENGTH]; struct { unsigned int size; void *addr; dma_addr_t dma; } buffers[MAX_MEMORY_BUFFERS]; } capture, playback; }; static DEFINE_MUTEX(devices_mutex); static unsigned int devices_used; static struct usb_driver ua101_driver; static void abort_alsa_playback(struct ua101 *ua); static void abort_alsa_capture(struct ua101 *ua); static const char *usb_error_string(int err) { switch (err) { case -ENODEV: return "no device"; case -ENOENT: return "endpoint not enabled"; case -EPIPE: return "endpoint stalled"; case -ENOSPC: return "not enough bandwidth"; case -ESHUTDOWN: return "device disabled"; case -EHOSTUNREACH: return "device suspended"; case -EINVAL: case -EAGAIN: case -EFBIG: case -EMSGSIZE: return "internal error"; default: return "unknown error"; } } static void abort_usb_capture(struct ua101 *ua) { if (test_and_clear_bit(USB_CAPTURE_RUNNING, &ua->states)) { wake_up(&ua->alsa_capture_wait); wake_up(&ua->rate_feedback_wait); } } static void abort_usb_playback(struct ua101 *ua) { if (test_and_clear_bit(USB_PLAYBACK_RUNNING, &ua->states)) wake_up(&ua->alsa_playback_wait); } static void playback_urb_complete(struct urb *usb_urb) { struct ua101_urb *urb = (struct ua101_urb *)usb_urb; struct ua101 *ua = urb->urb.context; unsigned long flags; if (unlikely(urb->urb.status == -ENOENT || /* unlinked */ urb->urb.status == -ENODEV || /* device removed */ urb->urb.status == -ECONNRESET || /* unlinked */ urb->urb.status == -ESHUTDOWN)) { /* device disabled */ abort_usb_playback(ua); abort_alsa_playback(ua); return; } if (test_bit(USB_PLAYBACK_RUNNING, &ua->states)) { /* append URB to FIFO */ spin_lock_irqsave(&ua->lock, flags); list_add_tail(&urb->ready_list, &ua->ready_playback_urbs); if (ua->rate_feedback_count > 0) queue_work(system_highpri_wq, &ua->playback_work); ua->playback.substream->runtime->delay -= urb->urb.iso_frame_desc[0].length / ua->playback.frame_bytes; spin_unlock_irqrestore(&ua->lock, flags); } } static void first_playback_urb_complete(struct urb *urb) { struct ua101 *ua = urb->context; urb->complete = playback_urb_complete; playback_urb_complete(urb); set_bit(PLAYBACK_URB_COMPLETED, &ua->states); wake_up(&ua->alsa_playback_wait); } /* copy data from the ALSA ring buffer into the URB buffer */ static bool copy_playback_data(struct ua101_stream *stream, struct urb *urb, unsigned int frames) { struct snd_pcm_runtime *runtime; unsigned int frame_bytes, frames1; const u8 *source; runtime = stream->substream->runtime; frame_bytes = stream->frame_bytes; source = runtime->dma_area + stream->buffer_pos * frame_bytes; if (stream->buffer_pos + frames <= runtime->buffer_size) { memcpy(urb->transfer_buffer, source, frames * frame_bytes); } else { /* wrap around at end of ring buffer */ frames1 = runtime->buffer_size - stream->buffer_pos; memcpy(urb->transfer_buffer, source, frames1 * frame_bytes); memcpy(urb->transfer_buffer + frames1 * frame_bytes, runtime->dma_area, (frames - frames1) * frame_bytes); } stream->buffer_pos += frames; if (stream->buffer_pos >= runtime->buffer_size) stream->buffer_pos -= runtime->buffer_size; stream->period_pos += frames; if (stream->period_pos >= runtime->period_size) { stream->period_pos -= runtime->period_size; return true; } return false; } static inline void add_with_wraparound(struct ua101 *ua, unsigned int *value, unsigned int add) { *value += add; if (*value >= ua->playback.queue_length) *value -= ua->playback.queue_length; } static void playback_work(struct work_struct *work) { struct ua101 *ua = container_of(work, struct ua101, playback_work); unsigned long flags; unsigned int frames; struct ua101_urb *urb; bool do_period_elapsed = false; int err; if (unlikely(!test_bit(USB_PLAYBACK_RUNNING, &ua->states))) return; /* * Synchronizing the playback rate to the capture rate is done by using * the same sequence of packet sizes for both streams. * Submitting a playback URB therefore requires both a ready URB and * the size of the corresponding capture packet, i.e., both playback * and capture URBs must have been completed. Since the USB core does * not guarantee that playback and capture complete callbacks are * called alternately, we use two FIFOs for packet sizes and read URBs; * submitting playback URBs is possible as long as both FIFOs are * nonempty. */ spin_lock_irqsave(&ua->lock, flags); while (ua->rate_feedback_count > 0 && !list_empty(&ua->ready_playback_urbs)) { /* take packet size out of FIFO */ frames = ua->rate_feedback[ua->rate_feedback_start]; add_with_wraparound(ua, &ua->rate_feedback_start, 1); ua->rate_feedback_count--; /* take URB out of FIFO */ urb = list_first_entry(&ua->ready_playback_urbs, struct ua101_urb, ready_list); list_del(&urb->ready_list); /* fill packet with data or silence */ urb->urb.iso_frame_desc[0].length = frames * ua->playback.frame_bytes; if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states)) do_period_elapsed |= copy_playback_data(&ua->playback, &urb->urb, frames); else memset(urb->urb.transfer_buffer, 0, urb->urb.iso_frame_desc[0].length); /* and off you go ... */ err = usb_submit_urb(&urb->urb, GFP_ATOMIC); if (unlikely(err < 0)) { spin_unlock_irqrestore(&ua->lock, flags); abort_usb_playback(ua); abort_alsa_playback(ua); dev_err(&ua->dev->dev, "USB request error %d: %s\n", err, usb_error_string(err)); return; } ua->playback.substream->runtime->delay += frames; } spin_unlock_irqrestore(&ua->lock, flags); if (do_period_elapsed) snd_pcm_period_elapsed(ua->playback.substream); } /* copy data from the URB buffer into the ALSA ring buffer */ static bool copy_capture_data(struct ua101_stream *stream, struct urb *urb, unsigned int frames) { struct snd_pcm_runtime *runtime; unsigned int frame_bytes, frames1; u8 *dest; runtime = stream->substream->runtime; frame_bytes = stream->frame_bytes; dest = runtime->dma_area + stream->buffer_pos * frame_bytes; if (stream->buffer_pos + frames <= runtime->buffer_size) { memcpy(dest, urb->transfer_buffer, frames * frame_bytes); } else { /* wrap around at end of ring buffer */ frames1 = runtime->buffer_size - stream->buffer_pos; memcpy(dest, urb->transfer_buffer, frames1 * frame_bytes); memcpy(runtime->dma_area, urb->transfer_buffer + frames1 * frame_bytes, (frames - frames1) * frame_bytes); } stream->buffer_pos += frames; if (stream->buffer_pos >= runtime->buffer_size) stream->buffer_pos -= runtime->buffer_size; stream->period_pos += frames; if (stream->period_pos >= runtime->period_size) { stream->period_pos -= runtime->period_size; return true; } return false; } static void capture_urb_complete(struct urb *urb) { struct ua101 *ua = urb->context; struct ua101_stream *stream = &ua->capture; unsigned long flags; unsigned int frames, write_ptr; bool do_period_elapsed; int err; if (unlikely(urb->status == -ENOENT || /* unlinked */ urb->status == -ENODEV || /* device removed */ urb->status == -ECONNRESET || /* unlinked */ urb->status == -ESHUTDOWN)) /* device disabled */ goto stream_stopped; if (urb->status >= 0 && urb->iso_frame_desc[0].status >= 0) frames = urb->iso_frame_desc[0].actual_length / stream->frame_bytes; else frames = 0; spin_lock_irqsave(&ua->lock, flags); if (frames > 0 && test_bit(ALSA_CAPTURE_RUNNING, &ua->states)) do_period_elapsed = copy_capture_data(stream, urb, frames); else do_period_elapsed = false; if (test_bit(USB_CAPTURE_RUNNING, &ua->states)) { err = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(err < 0)) { spin_unlock_irqrestore(&ua->lock, flags); dev_err(&ua->dev->dev, "USB request error %d: %s\n", err, usb_error_string(err)); goto stream_stopped; } /* append packet size to FIFO */ write_ptr = ua->rate_feedback_start; add_with_wraparound(ua, &write_ptr, ua->rate_feedback_count); ua->rate_feedback[write_ptr] = frames; if (ua->rate_feedback_count < ua->playback.queue_length) { ua->rate_feedback_count++; if (ua->rate_feedback_count == ua->playback.queue_length) wake_up(&ua->rate_feedback_wait); } else { /* * Ring buffer overflow; this happens when the playback * stream is not running. Throw away the oldest entry, * so that the playback stream, when it starts, sees * the most recent packet sizes. */ add_with_wraparound(ua, &ua->rate_feedback_start, 1); } if (test_bit(USB_PLAYBACK_RUNNING, &ua->states) && !list_empty(&ua->ready_playback_urbs)) queue_work(system_highpri_wq, &ua->playback_work); } spin_unlock_irqrestore(&ua->lock, flags); if (do_period_elapsed) snd_pcm_period_elapsed(stream->substream); return; stream_stopped: abort_usb_playback(ua); abort_usb_capture(ua); abort_alsa_playback(ua); abort_alsa_capture(ua); } static void first_capture_urb_complete(struct urb *urb) { struct ua101 *ua = urb->context; urb->complete = capture_urb_complete; capture_urb_complete(urb); set_bit(CAPTURE_URB_COMPLETED, &ua->states); wake_up(&ua->alsa_capture_wait); } static int submit_stream_urbs(struct ua101 *ua, struct ua101_stream *stream) { unsigned int i; for (i = 0; i < stream->queue_length; ++i) { int err = usb_submit_urb(&stream->urbs[i]->urb, GFP_KERNEL); if (err < 0) { dev_err(&ua->dev->dev, "USB request error %d: %s\n", err, usb_error_string(err)); return err; } } return 0; } static void kill_stream_urbs(struct ua101_stream *stream) { unsigned int i; for (i = 0; i < stream->queue_length; ++i) if (stream->urbs[i]) usb_kill_urb(&stream->urbs[i]->urb); } static int enable_iso_interface(struct ua101 *ua, unsigned int intf_index) { struct usb_host_interface *alts; alts = ua->intf[intf_index]->cur_altsetting; if (alts->desc.bAlternateSetting != 1) { int err = usb_set_interface(ua->dev, alts->desc.bInterfaceNumber, 1); if (err < 0) { dev_err(&ua->dev->dev, "cannot initialize interface; error %d: %s\n", err, usb_error_string(err)); return err; } } return 0; } static void disable_iso_interface(struct ua101 *ua, unsigned int intf_index) { struct usb_host_interface *alts; if (!ua->intf[intf_index]) return; alts = ua->intf[intf_index]->cur_altsetting; if (alts->desc.bAlternateSetting != 0) { int err = usb_set_interface(ua->dev, alts->desc.bInterfaceNumber, 0); if (err < 0 && !test_bit(DISCONNECTED, &ua->states)) dev_warn(&ua->dev->dev, "interface reset failed; error %d: %s\n", err, usb_error_string(err)); } } static void stop_usb_capture(struct ua101 *ua) { clear_bit(USB_CAPTURE_RUNNING, &ua->states); kill_stream_urbs(&ua->capture); disable_iso_interface(ua, INTF_CAPTURE); } static int start_usb_capture(struct ua101 *ua) { int err; if (test_bit(DISCONNECTED, &ua->states)) return -ENODEV; if (test_bit(USB_CAPTURE_RUNNING, &ua->states)) return 0; kill_stream_urbs(&ua->capture); err = enable_iso_interface(ua, INTF_CAPTURE); if (err < 0) return err; clear_bit(CAPTURE_URB_COMPLETED, &ua->states); ua->capture.urbs[0]->urb.complete = first_capture_urb_complete; ua->rate_feedback_start = 0; ua->rate_feedback_count = 0; set_bit(USB_CAPTURE_RUNNING, &ua->states); err = submit_stream_urbs(ua, &ua->capture); if (err < 0) stop_usb_capture(ua); return err; } static void stop_usb_playback(struct ua101 *ua) { clear_bit(USB_PLAYBACK_RUNNING, &ua->states); kill_stream_urbs(&ua->playback); cancel_work_sync(&ua->playback_work); disable_iso_interface(ua, INTF_PLAYBACK); } static int start_usb_playback(struct ua101 *ua) { unsigned int i, frames; struct urb *urb; int err = 0; if (test_bit(DISCONNECTED, &ua->states)) return -ENODEV; if (test_bit(USB_PLAYBACK_RUNNING, &ua->states)) return 0; kill_stream_urbs(&ua->playback); cancel_work_sync(&ua->playback_work); err = enable_iso_interface(ua, INTF_PLAYBACK); if (err < 0) return err; clear_bit(PLAYBACK_URB_COMPLETED, &ua->states); ua->playback.urbs[0]->urb.complete = first_playback_urb_complete; spin_lock_irq(&ua->lock); INIT_LIST_HEAD(&ua->ready_playback_urbs); spin_unlock_irq(&ua->lock); /* * We submit the initial URBs all at once, so we have to wait for the * packet size FIFO to be full. */ wait_event(ua->rate_feedback_wait, ua->rate_feedback_count >= ua->playback.queue_length || !test_bit(USB_CAPTURE_RUNNING, &ua->states) || test_bit(DISCONNECTED, &ua->states)); if (test_bit(DISCONNECTED, &ua->states)) { stop_usb_playback(ua); return -ENODEV; } if (!test_bit(USB_CAPTURE_RUNNING, &ua->states)) { stop_usb_playback(ua); return -EIO; } for (i = 0; i < ua->playback.queue_length; ++i) { /* all initial URBs contain silence */ spin_lock_irq(&ua->lock); frames = ua->rate_feedback[ua->rate_feedback_start]; add_with_wraparound(ua, &ua->rate_feedback_start, 1); ua->rate_feedback_count--; spin_unlock_irq(&ua->lock); urb = &ua->playback.urbs[i]->urb; urb->iso_frame_desc[0].length = frames * ua->playback.frame_bytes; memset(urb->transfer_buffer, 0, urb->iso_frame_desc[0].length); } set_bit(USB_PLAYBACK_RUNNING, &ua->states); err = submit_stream_urbs(ua, &ua->playback); if (err < 0) stop_usb_playback(ua); return err; } static void abort_alsa_capture(struct ua101 *ua) { if (test_bit(ALSA_CAPTURE_RUNNING, &ua->states)) snd_pcm_stop_xrun(ua->capture.substream); } static void abort_alsa_playback(struct ua101 *ua) { if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states)) snd_pcm_stop_xrun(ua->playback.substream); } static int set_stream_hw(struct ua101 *ua, struct snd_pcm_substream *substream, unsigned int channels) { int err; substream->runtime->hw.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BATCH | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_FIFO_IN_FRAMES; substream->runtime->hw.formats = ua->format_bit; substream->runtime->hw.rates = snd_pcm_rate_to_rate_bit(ua->rate); substream->runtime->hw.rate_min = ua->rate; substream->runtime->hw.rate_max = ua->rate; substream->runtime->hw.channels_min = channels; substream->runtime->hw.channels_max = channels; substream->runtime->hw.buffer_bytes_max = 45000 * 1024; substream->runtime->hw.period_bytes_min = 1; substream->runtime->hw.period_bytes_max = UINT_MAX; substream->runtime->hw.periods_min = 2; substream->runtime->hw.periods_max = UINT_MAX; err = snd_pcm_hw_constraint_minmax(substream->runtime, SNDRV_PCM_HW_PARAM_PERIOD_TIME, 1500000 / ua->packets_per_second, UINT_MAX); if (err < 0) return err; err = snd_pcm_hw_constraint_msbits(substream->runtime, 0, 32, 24); return err; } static int capture_pcm_open(struct snd_pcm_substream *substream) { struct ua101 *ua = substream->private_data; int err; ua->capture.substream = substream; err = set_stream_hw(ua, substream, ua->capture.channels); if (err < 0) return err; substream->runtime->hw.fifo_size = DIV_ROUND_CLOSEST(ua->rate, ua->packets_per_second); substream->runtime->delay = substream->runtime->hw.fifo_size; mutex_lock(&ua->mutex); err = start_usb_capture(ua); if (err >= 0) set_bit(ALSA_CAPTURE_OPEN, &ua->states); mutex_unlock(&ua->mutex); return err; } static int playback_pcm_open(struct snd_pcm_substream *substream) { struct ua101 *ua = substream->private_data; int err; ua->playback.substream = substream; err = set_stream_hw(ua, substream, ua->playback.channels); if (err < 0) return err; substream->runtime->hw.fifo_size = DIV_ROUND_CLOSEST(ua->rate * ua->playback.queue_length, ua->packets_per_second); mutex_lock(&ua->mutex); err = start_usb_capture(ua); if (err < 0) goto error; err = start_usb_playback(ua); if (err < 0) { if (!test_bit(ALSA_CAPTURE_OPEN, &ua->states)) stop_usb_capture(ua); goto error; } set_bit(ALSA_PLAYBACK_OPEN, &ua->states); error: mutex_unlock(&ua->mutex); return err; } static int capture_pcm_close(struct snd_pcm_substream *substream) { struct ua101 *ua = substream->private_data; mutex_lock(&ua->mutex); clear_bit(ALSA_CAPTURE_OPEN, &ua->states); if (!test_bit(ALSA_PLAYBACK_OPEN, &ua->states)) stop_usb_capture(ua); mutex_unlock(&ua->mutex); return 0; } static int playback_pcm_close(struct snd_pcm_substream *substream) { struct ua101 *ua = substream->private_data; mutex_lock(&ua->mutex); stop_usb_playback(ua); clear_bit(ALSA_PLAYBACK_OPEN, &ua->states); if (!test_bit(ALSA_CAPTURE_OPEN, &ua->states)) stop_usb_capture(ua); mutex_unlock(&ua->mutex); return 0; } static int capture_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct ua101 *ua = substream->private_data; int err; mutex_lock(&ua->mutex); err = start_usb_capture(ua); mutex_unlock(&ua->mutex); return err; } static int playback_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct ua101 *ua = substream->private_data; int err; mutex_lock(&ua->mutex); err = start_usb_capture(ua); if (err >= 0) err = start_usb_playback(ua); mutex_unlock(&ua->mutex); return err; } static int capture_pcm_prepare(struct snd_pcm_substream *substream) { struct ua101 *ua = substream->private_data; int err; mutex_lock(&ua->mutex); err = start_usb_capture(ua); mutex_unlock(&ua->mutex); if (err < 0) return err; /* * The EHCI driver schedules the first packet of an iso stream at 10 ms * in the future, i.e., no data is actually captured for that long. * Take the wait here so that the stream is known to be actually * running when the start trigger has been called. */ wait_event(ua->alsa_capture_wait, test_bit(CAPTURE_URB_COMPLETED, &ua->states) || !test_bit(USB_CAPTURE_RUNNING, &ua->states)); if (test_bit(DISCONNECTED, &ua->states)) return -ENODEV; if (!test_bit(USB_CAPTURE_RUNNING, &ua->states)) return -EIO; ua->capture.period_pos = 0; ua->capture.buffer_pos = 0; return 0; } static int playback_pcm_prepare(struct snd_pcm_substream *substream) { struct ua101 *ua = substream->private_data; int err; mutex_lock(&ua->mutex); err = start_usb_capture(ua); if (err >= 0) err = start_usb_playback(ua); mutex_unlock(&ua->mutex); if (err < 0) return err; /* see the comment in capture_pcm_prepare() */ wait_event(ua->alsa_playback_wait, test_bit(PLAYBACK_URB_COMPLETED, &ua->states) || !test_bit(USB_PLAYBACK_RUNNING, &ua->states)); if (test_bit(DISCONNECTED, &ua->states)) return -ENODEV; if (!test_bit(USB_PLAYBACK_RUNNING, &ua->states)) return -EIO; substream->runtime->delay = 0; ua->playback.period_pos = 0; ua->playback.buffer_pos = 0; return 0; } static int capture_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct ua101 *ua = substream->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: if (!test_bit(USB_CAPTURE_RUNNING, &ua->states)) return -EIO; set_bit(ALSA_CAPTURE_RUNNING, &ua->states); return 0; case SNDRV_PCM_TRIGGER_STOP: clear_bit(ALSA_CAPTURE_RUNNING, &ua->states); return 0; default: return -EINVAL; } } static int playback_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct ua101 *ua = substream->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: if (!test_bit(USB_PLAYBACK_RUNNING, &ua->states)) return -EIO; set_bit(ALSA_PLAYBACK_RUNNING, &ua->states); return 0; case SNDRV_PCM_TRIGGER_STOP: clear_bit(ALSA_PLAYBACK_RUNNING, &ua->states); return 0; default: return -EINVAL; } } static inline snd_pcm_uframes_t ua101_pcm_pointer(struct ua101 *ua, struct ua101_stream *stream) { unsigned long flags; unsigned int pos; spin_lock_irqsave(&ua->lock, flags); pos = stream->buffer_pos; spin_unlock_irqrestore(&ua->lock, flags); return pos; } static snd_pcm_uframes_t capture_pcm_pointer(struct snd_pcm_substream *subs) { struct ua101 *ua = subs->private_data; return ua101_pcm_pointer(ua, &ua->capture); } static snd_pcm_uframes_t playback_pcm_pointer(struct snd_pcm_substream *subs) { struct ua101 *ua = subs->private_data; return ua101_pcm_pointer(ua, &ua->playback); } static const struct snd_pcm_ops capture_pcm_ops = { .open = capture_pcm_open, .close = capture_pcm_close, .hw_params = capture_pcm_hw_params, .prepare = capture_pcm_prepare, .trigger = capture_pcm_trigger, .pointer = capture_pcm_pointer, }; static const struct snd_pcm_ops playback_pcm_ops = { .open = playback_pcm_open, .close = playback_pcm_close, .hw_params = playback_pcm_hw_params, .prepare = playback_pcm_prepare, .trigger = playback_pcm_trigger, .pointer = playback_pcm_pointer, }; static const struct uac_format_type_i_discrete_descriptor * find_format_descriptor(struct usb_interface *interface) { struct usb_host_interface *alt; u8 *extra; int extralen; if (interface->num_altsetting != 2) { dev_err(&interface->dev, "invalid num_altsetting\n"); return NULL; } alt = &interface->altsetting[0]; if (alt->desc.bNumEndpoints != 0) { dev_err(&interface->dev, "invalid bNumEndpoints\n"); return NULL; } alt = &interface->altsetting[1]; if (alt->desc.bNumEndpoints != 1) { dev_err(&interface->dev, "invalid bNumEndpoints\n"); return NULL; } extra = alt->extra; extralen = alt->extralen; while (extralen >= sizeof(struct usb_descriptor_header)) { struct uac_format_type_i_discrete_descriptor *desc; desc = (struct uac_format_type_i_discrete_descriptor *)extra; if (desc->bLength > extralen) { dev_err(&interface->dev, "descriptor overflow\n"); return NULL; } if (desc->bLength == UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1) && desc->bDescriptorType == USB_DT_CS_INTERFACE && desc->bDescriptorSubtype == UAC_FORMAT_TYPE) { if (desc->bFormatType != UAC_FORMAT_TYPE_I_PCM || desc->bSamFreqType != 1) { dev_err(&interface->dev, "invalid format type\n"); return NULL; } return desc; } extralen -= desc->bLength; extra += desc->bLength; } dev_err(&interface->dev, "sample format descriptor not found\n"); return NULL; } static int detect_usb_format(struct ua101 *ua) { const struct uac_format_type_i_discrete_descriptor *fmt_capture; const struct uac_format_type_i_discrete_descriptor *fmt_playback; const struct usb_endpoint_descriptor *epd; unsigned int rate2; fmt_capture = find_format_descriptor(ua->intf[INTF_CAPTURE]); fmt_playback = find_format_descriptor(ua->intf[INTF_PLAYBACK]); if (!fmt_capture || !fmt_playback) return -ENXIO; switch (fmt_capture->bSubframeSize) { case 3: ua->format_bit = SNDRV_PCM_FMTBIT_S24_3LE; break; case 4: ua->format_bit = SNDRV_PCM_FMTBIT_S32_LE; break; default: dev_err(&ua->dev->dev, "sample width is not 24 or 32 bits\n"); return -ENXIO; } if (fmt_capture->bSubframeSize != fmt_playback->bSubframeSize) { dev_err(&ua->dev->dev, "playback/capture sample widths do not match\n"); return -ENXIO; } if (fmt_capture->bBitResolution != 24 || fmt_playback->bBitResolution != 24) { dev_err(&ua->dev->dev, "sample width is not 24 bits\n"); return -ENXIO; } ua->rate = combine_triple(fmt_capture->tSamFreq[0]); rate2 = combine_triple(fmt_playback->tSamFreq[0]); if (ua->rate != rate2) { dev_err(&ua->dev->dev, "playback/capture rates do not match: %u/%u\n", rate2, ua->rate); return -ENXIO; } switch (ua->dev->speed) { case USB_SPEED_FULL: ua->packets_per_second = 1000; break; case USB_SPEED_HIGH: ua->packets_per_second = 8000; break; default: dev_err(&ua->dev->dev, "unknown device speed\n"); return -ENXIO; } ua->capture.channels = fmt_capture->bNrChannels; ua->playback.channels = fmt_playback->bNrChannels; ua->capture.frame_bytes = fmt_capture->bSubframeSize * ua->capture.channels; ua->playback.frame_bytes = fmt_playback->bSubframeSize * ua->playback.channels; epd = &ua->intf[INTF_CAPTURE]->altsetting[1].endpoint[0].desc; if (!usb_endpoint_is_isoc_in(epd) || usb_endpoint_maxp(epd) == 0) { dev_err(&ua->dev->dev, "invalid capture endpoint\n"); return -ENXIO; } ua->capture.usb_pipe = usb_rcvisocpipe(ua->dev, usb_endpoint_num(epd)); ua->capture.max_packet_bytes = usb_endpoint_maxp(epd); epd = &ua->intf[INTF_PLAYBACK]->altsetting[1].endpoint[0].desc; if (!usb_endpoint_is_isoc_out(epd) || usb_endpoint_maxp(epd) == 0) { dev_err(&ua->dev->dev, "invalid playback endpoint\n"); return -ENXIO; } ua->playback.usb_pipe = usb_sndisocpipe(ua->dev, usb_endpoint_num(epd)); ua->playback.max_packet_bytes = usb_endpoint_maxp(epd); return 0; } static int alloc_stream_buffers(struct ua101 *ua, struct ua101_stream *stream) { unsigned int remaining_packets, packets, packets_per_page, i; size_t size; stream->queue_length = queue_length; stream->queue_length = max(stream->queue_length, (unsigned int)MIN_QUEUE_LENGTH); stream->queue_length = min(stream->queue_length, (unsigned int)MAX_QUEUE_LENGTH); /* * The cache pool sizes used by usb_alloc_coherent() (128, 512, 2048) are * quite bad when used with the packet sizes of this device (e.g. 280, * 520, 624). Therefore, we allocate and subdivide entire pages, using * a smaller buffer only for the last chunk. */ remaining_packets = stream->queue_length; packets_per_page = PAGE_SIZE / stream->max_packet_bytes; for (i = 0; i < ARRAY_SIZE(stream->buffers); ++i) { packets = min(remaining_packets, packets_per_page); size = packets * stream->max_packet_bytes; stream->buffers[i].addr = usb_alloc_coherent(ua->dev, size, GFP_KERNEL, &stream->buffers[i].dma); if (!stream->buffers[i].addr) return -ENOMEM; stream->buffers[i].size = size; remaining_packets -= packets; if (!remaining_packets) break; } if (remaining_packets) { dev_err(&ua->dev->dev, "too many packets\n"); return -ENXIO; } return 0; } static void free_stream_buffers(struct ua101 *ua, struct ua101_stream *stream) { unsigned int i; for (i = 0; i < ARRAY_SIZE(stream->buffers); ++i) usb_free_coherent(ua->dev, stream->buffers[i].size, stream->buffers[i].addr, stream->buffers[i].dma); } static int alloc_stream_urbs(struct ua101 *ua, struct ua101_stream *stream, void (*urb_complete)(struct urb *)) { unsigned max_packet_size = stream->max_packet_bytes; struct ua101_urb *urb; unsigned int b, u = 0; for (b = 0; b < ARRAY_SIZE(stream->buffers); ++b) { unsigned int size = stream->buffers[b].size; u8 *addr = stream->buffers[b].addr; dma_addr_t dma = stream->buffers[b].dma; while (size >= max_packet_size) { if (u >= stream->queue_length) goto bufsize_error; urb = kmalloc(sizeof(*urb), GFP_KERNEL); if (!urb) return -ENOMEM; usb_init_urb(&urb->urb); urb->urb.dev = ua->dev; urb->urb.pipe = stream->usb_pipe; urb->urb.transfer_flags = URB_NO_TRANSFER_DMA_MAP; urb->urb.transfer_buffer = addr; urb->urb.transfer_dma = dma; urb->urb.transfer_buffer_length = max_packet_size; urb->urb.number_of_packets = 1; urb->urb.interval = 1; urb->urb.context = ua; urb->urb.complete = urb_complete; urb->urb.iso_frame_desc[0].offset = 0; urb->urb.iso_frame_desc[0].length = max_packet_size; stream->urbs[u++] = urb; size -= max_packet_size; addr += max_packet_size; dma += max_packet_size; } } if (u == stream->queue_length) return 0; bufsize_error: dev_err(&ua->dev->dev, "internal buffer size error\n"); return -ENXIO; } static void free_stream_urbs(struct ua101_stream *stream) { unsigned int i; for (i = 0; i < stream->queue_length; ++i) { kfree(stream->urbs[i]); stream->urbs[i] = NULL; } } static void free_usb_related_resources(struct ua101 *ua, struct usb_interface *interface) { unsigned int i; struct usb_interface *intf; mutex_lock(&ua->mutex); free_stream_urbs(&ua->capture); free_stream_urbs(&ua->playback); mutex_unlock(&ua->mutex); free_stream_buffers(ua, &ua->capture); free_stream_buffers(ua, &ua->playback); for (i = 0; i < ARRAY_SIZE(ua->intf); ++i) { mutex_lock(&ua->mutex); intf = ua->intf[i]; ua->intf[i] = NULL; mutex_unlock(&ua->mutex); if (intf) { usb_set_intfdata(intf, NULL); if (intf != interface) usb_driver_release_interface(&ua101_driver, intf); } } } static void ua101_card_free(struct snd_card *card) { struct ua101 *ua = card->private_data; mutex_destroy(&ua->mutex); } static int ua101_probe(struct usb_interface *interface, const struct usb_device_id *usb_id) { static const struct snd_usb_midi_endpoint_info midi_ep = { .out_cables = 0x0001, .in_cables = 0x0001 }; static const struct snd_usb_audio_quirk midi_quirk = { .type = QUIRK_MIDI_FIXED_ENDPOINT, .data = &midi_ep }; static const int intf_numbers[2][3] = { { /* UA-101 */ [INTF_PLAYBACK] = 0, [INTF_CAPTURE] = 1, [INTF_MIDI] = 2, }, { /* UA-1000 */ [INTF_CAPTURE] = 1, [INTF_PLAYBACK] = 2, [INTF_MIDI] = 3, }, }; struct snd_card *card; struct ua101 *ua; unsigned int card_index, i; int is_ua1000; const char *name; char usb_path[32]; int err; is_ua1000 = usb_id->idProduct == 0x0044; if (interface->altsetting->desc.bInterfaceNumber != intf_numbers[is_ua1000][0]) return -ENODEV; mutex_lock(&devices_mutex); for (card_index = 0; card_index < SNDRV_CARDS; ++card_index) if (enable[card_index] && !(devices_used & (1 << card_index))) break; if (card_index >= SNDRV_CARDS) { mutex_unlock(&devices_mutex); return -ENOENT; } err = snd_card_new(&interface->dev, index[card_index], id[card_index], THIS_MODULE, sizeof(*ua), &card); if (err < 0) { mutex_unlock(&devices_mutex); return err; } card->private_free = ua101_card_free; ua = card->private_data; ua->dev = interface_to_usbdev(interface); ua->card = card; ua->card_index = card_index; INIT_LIST_HEAD(&ua->midi_list); spin_lock_init(&ua->lock); mutex_init(&ua->mutex); INIT_LIST_HEAD(&ua->ready_playback_urbs); INIT_WORK(&ua->playback_work, playback_work); init_waitqueue_head(&ua->alsa_capture_wait); init_waitqueue_head(&ua->rate_feedback_wait); init_waitqueue_head(&ua->alsa_playback_wait); ua->intf[0] = interface; for (i = 1; i < ARRAY_SIZE(ua->intf); ++i) { ua->intf[i] = usb_ifnum_to_if(ua->dev, intf_numbers[is_ua1000][i]); if (!ua->intf[i]) { dev_err(&ua->dev->dev, "interface %u not found\n", intf_numbers[is_ua1000][i]); err = -ENXIO; goto probe_error; } err = usb_driver_claim_interface(&ua101_driver, ua->intf[i], ua); if (err < 0) { ua->intf[i] = NULL; err = -EBUSY; goto probe_error; } } err = detect_usb_format(ua); if (err < 0) goto probe_error; name = usb_id->idProduct == 0x0044 ? "UA-1000" : "UA-101"; strcpy(card->driver, "UA-101"); strcpy(card->shortname, name); usb_make_path(ua->dev, usb_path, sizeof(usb_path)); snprintf(ua->card->longname, sizeof(ua->card->longname), "EDIROL %s (serial %s), %u Hz at %s, %s speed", name, ua->dev->serial ? ua->dev->serial : "?", ua->rate, usb_path, ua->dev->speed == USB_SPEED_HIGH ? "high" : "full"); err = alloc_stream_buffers(ua, &ua->capture); if (err < 0) goto probe_error; err = alloc_stream_buffers(ua, &ua->playback); if (err < 0) goto probe_error; err = alloc_stream_urbs(ua, &ua->capture, capture_urb_complete); if (err < 0) goto probe_error; err = alloc_stream_urbs(ua, &ua->playback, playback_urb_complete); if (err < 0) goto probe_error; err = snd_pcm_new(card, name, 0, 1, 1, &ua->pcm); if (err < 0) goto probe_error; ua->pcm->private_data = ua; strcpy(ua->pcm->name, name); snd_pcm_set_ops(ua->pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_pcm_ops); snd_pcm_set_ops(ua->pcm, SNDRV_PCM_STREAM_CAPTURE, &capture_pcm_ops); snd_pcm_set_managed_buffer_all(ua->pcm, SNDRV_DMA_TYPE_VMALLOC, NULL, 0, 0); err = snd_usbmidi_create(card, ua->intf[INTF_MIDI], &ua->midi_list, &midi_quirk); if (err < 0) goto probe_error; err = snd_card_register(card); if (err < 0) goto probe_error; usb_set_intfdata(interface, ua); devices_used |= 1 << card_index; mutex_unlock(&devices_mutex); return 0; probe_error: free_usb_related_resources(ua, interface); snd_card_free(card); mutex_unlock(&devices_mutex); return err; } static void ua101_disconnect(struct usb_interface *interface) { struct ua101 *ua = usb_get_intfdata(interface); struct list_head *midi; if (!ua) return; mutex_lock(&devices_mutex); set_bit(DISCONNECTED, &ua->states); wake_up(&ua->rate_feedback_wait); /* make sure that userspace cannot create new requests */ snd_card_disconnect(ua->card); /* make sure that there are no pending USB requests */ list_for_each(midi, &ua->midi_list) snd_usbmidi_disconnect(midi); abort_alsa_playback(ua); abort_alsa_capture(ua); mutex_lock(&ua->mutex); stop_usb_playback(ua); stop_usb_capture(ua); mutex_unlock(&ua->mutex); free_usb_related_resources(ua, interface); devices_used &= ~(1 << ua->card_index); snd_card_free_when_closed(ua->card); mutex_unlock(&devices_mutex); } static const struct usb_device_id ua101_ids[] = { { USB_DEVICE(0x0582, 0x0044) }, /* UA-1000 high speed */ { USB_DEVICE(0x0582, 0x007d) }, /* UA-101 high speed */ { USB_DEVICE(0x0582, 0x008d) }, /* UA-101 full speed */ { } }; MODULE_DEVICE_TABLE(usb, ua101_ids); static struct usb_driver ua101_driver = { .name = "snd-ua101", .id_table = ua101_ids, .probe = ua101_probe, .disconnect = ua101_disconnect, #if 0 .suspend = ua101_suspend, .resume = ua101_resume, #endif }; module_usb_driver(ua101_driver);
234 234 191 191 120 119 14 1 5 5 5 22 19 21 20 6 5 5 20 5 17 20 20 18 16 16 21 21 12 12 21 1 18 1 21 93 26 18 22 22 45 4 4 45 139 4 12 196 176 197 13 13 12 12 12 12 12 5 5 13 13 12 13 13 1 1 218 197 197 41 185 197 197 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 // SPDX-License-Identifier: GPL-2.0-only /* * TCP CUBIC: Binary Increase Congestion control for TCP v2.3 * Home page: * http://netsrv.csc.ncsu.edu/twiki/bin/view/Main/BIC * This is from the implementation of CUBIC TCP in * Sangtae Ha, Injong Rhee and Lisong Xu, * "CUBIC: A New TCP-Friendly High-Speed TCP Variant" * in ACM SIGOPS Operating System Review, July 2008. * Available from: * http://netsrv.csc.ncsu.edu/export/cubic_a_new_tcp_2008.pdf * * CUBIC integrates a new slow start algorithm, called HyStart. * The details of HyStart are presented in * Sangtae Ha and Injong Rhee, * "Taming the Elephants: New TCP Slow Start", NCSU TechReport 2008. * Available from: * http://netsrv.csc.ncsu.edu/export/hystart_techreport_2008.pdf * * All testing results are available from: * http://netsrv.csc.ncsu.edu/wiki/index.php/TCP_Testing * * Unless CUBIC is enabled and congestion window is large * this behaves the same as the original Reno. */ #include <linux/mm.h> #include <linux/btf.h> #include <linux/btf_ids.h> #include <linux/module.h> #include <linux/math64.h> #include <net/tcp.h> #define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation * max_cwnd = snd_cwnd * beta */ #define BICTCP_HZ 10 /* BIC HZ 2^10 = 1024 */ /* Two methods of hybrid slow start */ #define HYSTART_ACK_TRAIN 0x1 #define HYSTART_DELAY 0x2 /* Number of delay samples for detecting the increase of delay */ #define HYSTART_MIN_SAMPLES 8 #define HYSTART_DELAY_MIN (4000U) /* 4 ms */ #define HYSTART_DELAY_MAX (16000U) /* 16 ms */ #define HYSTART_DELAY_THRESH(x) clamp(x, HYSTART_DELAY_MIN, HYSTART_DELAY_MAX) static int fast_convergence __read_mostly = 1; static int beta __read_mostly = 717; /* = 717/1024 (BICTCP_BETA_SCALE) */ static int initial_ssthresh __read_mostly; static int bic_scale __read_mostly = 41; static int tcp_friendliness __read_mostly = 1; static int hystart __read_mostly = 1; static int hystart_detect __read_mostly = HYSTART_ACK_TRAIN | HYSTART_DELAY; static int hystart_low_window __read_mostly = 16; static int hystart_ack_delta_us __read_mostly = 2000; static u32 cube_rtt_scale __read_mostly; static u32 beta_scale __read_mostly; static u64 cube_factor __read_mostly; /* Note parameters that are used for precomputing scale factors are read-only */ module_param(fast_convergence, int, 0644); MODULE_PARM_DESC(fast_convergence, "turn on/off fast convergence"); module_param(beta, int, 0644); MODULE_PARM_DESC(beta, "beta for multiplicative increase"); module_param(initial_ssthresh, int, 0644); MODULE_PARM_DESC(initial_ssthresh, "initial value of slow start threshold"); module_param(bic_scale, int, 0444); MODULE_PARM_DESC(bic_scale, "scale (scaled by 1024) value for bic function (bic_scale/1024)"); module_param(tcp_friendliness, int, 0644); MODULE_PARM_DESC(tcp_friendliness, "turn on/off tcp friendliness"); module_param(hystart, int, 0644); MODULE_PARM_DESC(hystart, "turn on/off hybrid slow start algorithm"); module_param(hystart_detect, int, 0644); MODULE_PARM_DESC(hystart_detect, "hybrid slow start detection mechanisms" " 1: packet-train 2: delay 3: both packet-train and delay"); module_param(hystart_low_window, int, 0644); MODULE_PARM_DESC(hystart_low_window, "lower bound cwnd for hybrid slow start"); module_param(hystart_ack_delta_us, int, 0644); MODULE_PARM_DESC(hystart_ack_delta_us, "spacing between ack's indicating train (usecs)"); /* BIC TCP Parameters */ struct bictcp { u32 cnt; /* increase cwnd by 1 after ACKs */ u32 last_max_cwnd; /* last maximum snd_cwnd */ u32 last_cwnd; /* the last snd_cwnd */ u32 last_time; /* time when updated last_cwnd */ u32 bic_origin_point;/* origin point of bic function */ u32 bic_K; /* time to origin point from the beginning of the current epoch */ u32 delay_min; /* min delay (usec) */ u32 epoch_start; /* beginning of an epoch */ u32 ack_cnt; /* number of acks */ u32 tcp_cwnd; /* estimated tcp cwnd */ u16 unused; u8 sample_cnt; /* number of samples to decide curr_rtt */ u8 found; /* the exit point is found? */ u32 round_start; /* beginning of each round */ u32 end_seq; /* end_seq of the round */ u32 last_ack; /* last time when the ACK spacing is close */ u32 curr_rtt; /* the minimum rtt of current round */ }; static inline void bictcp_reset(struct bictcp *ca) { memset(ca, 0, offsetof(struct bictcp, unused)); ca->found = 0; } static inline u32 bictcp_clock_us(const struct sock *sk) { return tcp_sk(sk)->tcp_mstamp; } static inline void bictcp_hystart_reset(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct bictcp *ca = inet_csk_ca(sk); ca->round_start = ca->last_ack = bictcp_clock_us(sk); ca->end_seq = tp->snd_nxt; ca->curr_rtt = ~0U; ca->sample_cnt = 0; } __bpf_kfunc static void cubictcp_init(struct sock *sk) { struct bictcp *ca = inet_csk_ca(sk); bictcp_reset(ca); if (hystart) bictcp_hystart_reset(sk); if (!hystart && initial_ssthresh) tcp_sk(sk)->snd_ssthresh = initial_ssthresh; } __bpf_kfunc static void cubictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event) { if (event == CA_EVENT_TX_START) { struct bictcp *ca = inet_csk_ca(sk); u32 now = tcp_jiffies32; s32 delta; delta = now - tcp_sk(sk)->lsndtime; /* We were application limited (idle) for a while. * Shift epoch_start to keep cwnd growth to cubic curve. */ if (ca->epoch_start && delta > 0) { ca->epoch_start += delta; if (after(ca->epoch_start, now)) ca->epoch_start = now; } return; } } /* calculate the cubic root of x using a table lookup followed by one * Newton-Raphson iteration. * Avg err ~= 0.195% */ static u32 cubic_root(u64 a) { u32 x, b, shift; /* * cbrt(x) MSB values for x MSB values in [0..63]. * Precomputed then refined by hand - Willy Tarreau * * For x in [0..63], * v = cbrt(x << 18) - 1 * cbrt(x) = (v[x] + 10) >> 6 */ static const u8 v[] = { /* 0x00 */ 0, 54, 54, 54, 118, 118, 118, 118, /* 0x08 */ 123, 129, 134, 138, 143, 147, 151, 156, /* 0x10 */ 157, 161, 164, 168, 170, 173, 176, 179, /* 0x18 */ 181, 185, 187, 190, 192, 194, 197, 199, /* 0x20 */ 200, 202, 204, 206, 209, 211, 213, 215, /* 0x28 */ 217, 219, 221, 222, 224, 225, 227, 229, /* 0x30 */ 231, 232, 234, 236, 237, 239, 240, 242, /* 0x38 */ 244, 245, 246, 248, 250, 251, 252, 254, }; b = fls64(a); if (b < 7) { /* a in [0..63] */ return ((u32)v[(u32)a] + 35) >> 6; } b = ((b * 84) >> 8) - 1; shift = (a >> (b * 3)); x = ((u32)(((u32)v[shift] + 10) << b)) >> 6; /* * Newton-Raphson iteration * 2 * x = ( 2 * x + a / x ) / 3 * k+1 k k */ x = (2 * x + (u32)div64_u64(a, (u64)x * (u64)(x - 1))); x = ((x * 341) >> 10); return x; } /* * Compute congestion window to use. */ static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked) { u32 delta, bic_target, max_cnt; u64 offs, t; ca->ack_cnt += acked; /* count the number of ACKed packets */ if (ca->last_cwnd == cwnd && (s32)(tcp_jiffies32 - ca->last_time) <= HZ / 32) return; /* The CUBIC function can update ca->cnt at most once per jiffy. * On all cwnd reduction events, ca->epoch_start is set to 0, * which will force a recalculation of ca->cnt. */ if (ca->epoch_start && tcp_jiffies32 == ca->last_time) goto tcp_friendliness; ca->last_cwnd = cwnd; ca->last_time = tcp_jiffies32; if (ca->epoch_start == 0) { ca->epoch_start = tcp_jiffies32; /* record beginning */ ca->ack_cnt = acked; /* start counting */ ca->tcp_cwnd = cwnd; /* syn with cubic */ if (ca->last_max_cwnd <= cwnd) { ca->bic_K = 0; ca->bic_origin_point = cwnd; } else { /* Compute new K based on * (wmax-cwnd) * (srtt>>3 / HZ) / c * 2^(3*bictcp_HZ) */ ca->bic_K = cubic_root(cube_factor * (ca->last_max_cwnd - cwnd)); ca->bic_origin_point = ca->last_max_cwnd; } } /* cubic function - calc*/ /* calculate c * time^3 / rtt, * while considering overflow in calculation of time^3 * (so time^3 is done by using 64 bit) * and without the support of division of 64bit numbers * (so all divisions are done by using 32 bit) * also NOTE the unit of those veriables * time = (t - K) / 2^bictcp_HZ * c = bic_scale >> 10 * rtt = (srtt >> 3) / HZ * !!! The following code does not have overflow problems, * if the cwnd < 1 million packets !!! */ t = (s32)(tcp_jiffies32 - ca->epoch_start); t += usecs_to_jiffies(ca->delay_min); /* change the unit from HZ to bictcp_HZ */ t <<= BICTCP_HZ; do_div(t, HZ); if (t < ca->bic_K) /* t - K */ offs = ca->bic_K - t; else offs = t - ca->bic_K; /* c/rtt * (t-K)^3 */ delta = (cube_rtt_scale * offs * offs * offs) >> (10+3*BICTCP_HZ); if (t < ca->bic_K) /* below origin*/ bic_target = ca->bic_origin_point - delta; else /* above origin*/ bic_target = ca->bic_origin_point + delta; /* cubic function - calc bictcp_cnt*/ if (bic_target > cwnd) { ca->cnt = cwnd / (bic_target - cwnd); } else { ca->cnt = 100 * cwnd; /* very small increment*/ } /* * The initial growth of cubic function may be too conservative * when the available bandwidth is still unknown. */ if (ca->last_max_cwnd == 0 && ca->cnt > 20) ca->cnt = 20; /* increase cwnd 5% per RTT */ tcp_friendliness: /* TCP Friendly */ if (tcp_friendliness) { u32 scale = beta_scale; delta = (cwnd * scale) >> 3; while (ca->ack_cnt > delta) { /* update tcp cwnd */ ca->ack_cnt -= delta; ca->tcp_cwnd++; } if (ca->tcp_cwnd > cwnd) { /* if bic is slower than tcp */ delta = ca->tcp_cwnd - cwnd; max_cnt = cwnd / delta; if (ca->cnt > max_cnt) ca->cnt = max_cnt; } } /* The maximum rate of cwnd increase CUBIC allows is 1 packet per * 2 packets ACKed, meaning cwnd grows at 1.5x per RTT. */ ca->cnt = max(ca->cnt, 2U); } __bpf_kfunc static void cubictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) { struct tcp_sock *tp = tcp_sk(sk); struct bictcp *ca = inet_csk_ca(sk); if (!tcp_is_cwnd_limited(sk)) return; if (tcp_in_slow_start(tp)) { acked = tcp_slow_start(tp, acked); if (!acked) return; } bictcp_update(ca, tcp_snd_cwnd(tp), acked); tcp_cong_avoid_ai(tp, ca->cnt, acked); } __bpf_kfunc static u32 cubictcp_recalc_ssthresh(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); struct bictcp *ca = inet_csk_ca(sk); ca->epoch_start = 0; /* end of epoch */ /* Wmax and fast convergence */ if (tcp_snd_cwnd(tp) < ca->last_max_cwnd && fast_convergence) ca->last_max_cwnd = (tcp_snd_cwnd(tp) * (BICTCP_BETA_SCALE + beta)) / (2 * BICTCP_BETA_SCALE); else ca->last_max_cwnd = tcp_snd_cwnd(tp); return max((tcp_snd_cwnd(tp) * beta) / BICTCP_BETA_SCALE, 2U); } __bpf_kfunc static void cubictcp_state(struct sock *sk, u8 new_state) { if (new_state == TCP_CA_Loss) { bictcp_reset(inet_csk_ca(sk)); bictcp_hystart_reset(sk); } } /* Account for TSO/GRO delays. * Otherwise short RTT flows could get too small ssthresh, since during * slow start we begin with small TSO packets and ca->delay_min would * not account for long aggregation delay when TSO packets get bigger. * Ideally even with a very small RTT we would like to have at least one * TSO packet being sent and received by GRO, and another one in qdisc layer. * We apply another 100% factor because @rate is doubled at this point. * We cap the cushion to 1ms. */ static u32 hystart_ack_delay(const struct sock *sk) { unsigned long rate; rate = READ_ONCE(sk->sk_pacing_rate); if (!rate) return 0; return min_t(u64, USEC_PER_MSEC, div64_ul((u64)sk->sk_gso_max_size * 4 * USEC_PER_SEC, rate)); } static void hystart_update(struct sock *sk, u32 delay) { struct tcp_sock *tp = tcp_sk(sk); struct bictcp *ca = inet_csk_ca(sk); u32 threshold; if (after(tp->snd_una, ca->end_seq)) bictcp_hystart_reset(sk); /* hystart triggers when cwnd is larger than some threshold */ if (tcp_snd_cwnd(tp) < hystart_low_window) return; if (hystart_detect & HYSTART_ACK_TRAIN) { u32 now = bictcp_clock_us(sk); /* first detection parameter - ack-train detection */ if ((s32)(now - ca->last_ack) <= hystart_ack_delta_us) { ca->last_ack = now; threshold = ca->delay_min + hystart_ack_delay(sk); /* Hystart ack train triggers if we get ack past * ca->delay_min/2. * Pacing might have delayed packets up to RTT/2 * during slow start. */ if (sk->sk_pacing_status == SK_PACING_NONE) threshold >>= 1; if ((s32)(now - ca->round_start) > threshold) { ca->found = 1; pr_debug("hystart_ack_train (%u > %u) delay_min %u (+ ack_delay %u) cwnd %u\n", now - ca->round_start, threshold, ca->delay_min, hystart_ack_delay(sk), tcp_snd_cwnd(tp)); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHYSTARTTRAINDETECT); NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPHYSTARTTRAINCWND, tcp_snd_cwnd(tp)); tp->snd_ssthresh = tcp_snd_cwnd(tp); } } } if (hystart_detect & HYSTART_DELAY) { /* obtain the minimum delay of more than sampling packets */ if (ca->curr_rtt > delay) ca->curr_rtt = delay; if (ca->sample_cnt < HYSTART_MIN_SAMPLES) { ca->sample_cnt++; } else { if (ca->curr_rtt > ca->delay_min + HYSTART_DELAY_THRESH(ca->delay_min >> 3)) { ca->found = 1; NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHYSTARTDELAYDETECT); NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPHYSTARTDELAYCWND, tcp_snd_cwnd(tp)); tp->snd_ssthresh = tcp_snd_cwnd(tp); } } } } __bpf_kfunc static void cubictcp_acked(struct sock *sk, const struct ack_sample *sample) { const struct tcp_sock *tp = tcp_sk(sk); struct bictcp *ca = inet_csk_ca(sk); u32 delay; /* Some calls are for duplicates without timetamps */ if (sample->rtt_us < 0) return; /* Discard delay samples right after fast recovery */ if (ca->epoch_start && (s32)(tcp_jiffies32 - ca->epoch_start) < HZ) return; delay = sample->rtt_us; if (delay == 0) delay = 1; /* first time call or link delay decreases */ if (ca->delay_min == 0 || ca->delay_min > delay) ca->delay_min = delay; if (!ca->found && tcp_in_slow_start(tp) && hystart) hystart_update(sk, delay); } static struct tcp_congestion_ops cubictcp __read_mostly = { .init = cubictcp_init, .ssthresh = cubictcp_recalc_ssthresh, .cong_avoid = cubictcp_cong_avoid, .set_state = cubictcp_state, .undo_cwnd = tcp_reno_undo_cwnd, .cwnd_event = cubictcp_cwnd_event, .pkts_acked = cubictcp_acked, .owner = THIS_MODULE, .name = "cubic", }; BTF_KFUNCS_START(tcp_cubic_check_kfunc_ids) BTF_ID_FLAGS(func, cubictcp_init) BTF_ID_FLAGS(func, cubictcp_recalc_ssthresh) BTF_ID_FLAGS(func, cubictcp_cong_avoid) BTF_ID_FLAGS(func, cubictcp_state) BTF_ID_FLAGS(func, cubictcp_cwnd_event) BTF_ID_FLAGS(func, cubictcp_acked) BTF_KFUNCS_END(tcp_cubic_check_kfunc_ids) static const struct btf_kfunc_id_set tcp_cubic_kfunc_set = { .owner = THIS_MODULE, .set = &tcp_cubic_check_kfunc_ids, }; static int __init cubictcp_register(void) { int ret; BUILD_BUG_ON(sizeof(struct bictcp) > ICSK_CA_PRIV_SIZE); /* Precompute a bunch of the scaling factors that are used per-packet * based on SRTT of 100ms */ beta_scale = 8*(BICTCP_BETA_SCALE+beta) / 3 / (BICTCP_BETA_SCALE - beta); cube_rtt_scale = (bic_scale * 10); /* 1024*c/rtt */ /* calculate the "K" for (wmax-cwnd) = c/rtt * K^3 * so K = cubic_root( (wmax-cwnd)*rtt/c ) * the unit of K is bictcp_HZ=2^10, not HZ * * c = bic_scale >> 10 * rtt = 100ms * * the following code has been designed and tested for * cwnd < 1 million packets * RTT < 100 seconds * HZ < 1,000,00 (corresponding to 10 nano-second) */ /* 1/c * 2^2*bictcp_HZ * srtt */ cube_factor = 1ull << (10+3*BICTCP_HZ); /* 2^40 */ /* divide by bic_scale and by constant Srtt (100ms) */ do_div(cube_factor, bic_scale * 10); ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &tcp_cubic_kfunc_set); if (ret < 0) return ret; return tcp_register_congestion_control(&cubictcp); } static void __exit cubictcp_unregister(void) { tcp_unregister_congestion_control(&cubictcp); } module_init(cubictcp_register); module_exit(cubictcp_unregister); MODULE_AUTHOR("Sangtae Ha, Stephen Hemminger"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CUBIC TCP"); MODULE_VERSION("2.3");
123 107 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 /* * Copyright (C) 2014 Red Hat * Copyright (C) 2014 Intel Corp. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Rob Clark <robdclark@gmail.com> * Daniel Vetter <daniel.vetter@ffwll.ch> */ #ifndef DRM_ATOMIC_HELPER_H_ #define DRM_ATOMIC_HELPER_H_ #include <drm/drm_crtc.h> #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_modeset_helper.h> #include <drm/drm_atomic_state_helper.h> #include <drm/drm_util.h> /* * Drivers that don't allow primary plane scaling may pass this macro in place * of the min/max scale parameters of the plane-state checker function. * * Due to src being in 16.16 fixed point and dest being in integer pixels, * 1<<16 represents no scaling. */ #define DRM_PLANE_NO_SCALING (1<<16) struct drm_atomic_state; struct drm_private_obj; struct drm_private_state; int drm_atomic_helper_check_modeset(struct drm_device *dev, struct drm_atomic_state *state); int drm_atomic_helper_check_wb_connector_state(struct drm_connector *connector, struct drm_atomic_state *state); int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, const struct drm_crtc_state *crtc_state, int min_scale, int max_scale, bool can_position, bool can_update_disabled); int drm_atomic_helper_check_planes(struct drm_device *dev, struct drm_atomic_state *state); int drm_atomic_helper_check_crtc_primary_plane(struct drm_crtc_state *crtc_state); int drm_atomic_helper_check(struct drm_device *dev, struct drm_atomic_state *state); void drm_atomic_helper_commit_tail(struct drm_atomic_state *state); void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *state); int drm_atomic_helper_commit(struct drm_device *dev, struct drm_atomic_state *state, bool nonblock); int drm_atomic_helper_async_check(struct drm_device *dev, struct drm_atomic_state *state); void drm_atomic_helper_async_commit(struct drm_device *dev, struct drm_atomic_state *state); int drm_atomic_helper_wait_for_fences(struct drm_device *dev, struct drm_atomic_state *state, bool pre_swap); void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, struct drm_atomic_state *old_state); void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev, struct drm_atomic_state *old_state); void drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev, struct drm_atomic_state *old_state); void drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state); void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev, struct drm_atomic_state *state); void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, struct drm_atomic_state *old_state); int drm_atomic_helper_prepare_planes(struct drm_device *dev, struct drm_atomic_state *state); void drm_atomic_helper_unprepare_planes(struct drm_device *dev, struct drm_atomic_state *state); #define DRM_PLANE_COMMIT_ACTIVE_ONLY BIT(0) #define DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET BIT(1) void drm_atomic_helper_commit_planes(struct drm_device *dev, struct drm_atomic_state *state, uint32_t flags); void drm_atomic_helper_cleanup_planes(struct drm_device *dev, struct drm_atomic_state *old_state); void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state); void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state, bool atomic); int __must_check drm_atomic_helper_swap_state(struct drm_atomic_state *state, bool stall); /* nonblocking commit helpers */ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, bool nonblock); void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state); void drm_atomic_helper_fake_vblank(struct drm_atomic_state *state); void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state); void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state); /* implementations for legacy interfaces */ int drm_atomic_helper_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h, struct drm_modeset_acquire_ctx *ctx); int drm_atomic_helper_disable_plane(struct drm_plane *plane, struct drm_modeset_acquire_ctx *ctx); int drm_atomic_helper_set_config(struct drm_mode_set *set, struct drm_modeset_acquire_ctx *ctx); int drm_atomic_helper_disable_all(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx); void drm_atomic_helper_shutdown(struct drm_device *dev); struct drm_atomic_state * drm_atomic_helper_duplicate_state(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx); struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev); int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state, struct drm_modeset_acquire_ctx *ctx); int drm_atomic_helper_resume(struct drm_device *dev, struct drm_atomic_state *state); int drm_atomic_helper_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t flags, struct drm_modeset_acquire_ctx *ctx); int drm_atomic_helper_page_flip_target( struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t flags, uint32_t target, struct drm_modeset_acquire_ctx *ctx); /** * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC * @plane: the loop cursor * @crtc: the CRTC whose planes are iterated * * This iterates over the current state, useful (for example) when applying * atomic state after it has been checked and swapped. To iterate over the * planes which *will* be attached (more useful in code called from * &drm_mode_config_funcs.atomic_check) see * drm_atomic_crtc_state_for_each_plane(). */ #define drm_atomic_crtc_for_each_plane(plane, crtc) \ drm_for_each_plane_mask(plane, (crtc)->dev, (crtc)->state->plane_mask) /** * drm_atomic_crtc_state_for_each_plane - iterate over attached planes in new state * @plane: the loop cursor * @crtc_state: the incoming CRTC state * * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be * attached if the specified state is applied. Useful during for example * in code called from &drm_mode_config_funcs.atomic_check operations, to * validate the incoming state. */ #define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \ drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask) /** * drm_atomic_crtc_state_for_each_plane_state - iterate over attached planes in new state * @plane: the loop cursor * @plane_state: loop cursor for the plane's state, must be const * @crtc_state: the incoming CRTC state * * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be * attached if the specified state is applied. Useful during for example * in code called from &drm_mode_config_funcs.atomic_check operations, to * validate the incoming state. * * Compared to just drm_atomic_crtc_state_for_each_plane() this also fills in a * const plane_state. This is useful when a driver just wants to peek at other * active planes on this CRTC, but does not need to change it. */ #define drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) \ drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask) \ for_each_if ((plane_state = \ __drm_atomic_get_current_plane_state((crtc_state)->state, \ plane))) /** * drm_atomic_plane_enabling - check whether a plane is being enabled * @old_plane_state: old atomic plane state * @new_plane_state: new atomic plane state * * Checks the atomic state of a plane to determine whether it's being enabled * or not. This also WARNs if it detects an invalid state (both CRTC and FB * need to either both be NULL or both be non-NULL). * * RETURNS: * True if the plane is being enabled, false otherwise. */ static inline bool drm_atomic_plane_enabling(struct drm_plane_state *old_plane_state, struct drm_plane_state *new_plane_state) { /* * When enabling a plane, CRTC and FB should always be set together. * Anything else should be considered a bug in the atomic core, so we * gently warn about it. */ WARN_ON((!new_plane_state->crtc && new_plane_state->fb) || (new_plane_state->crtc && !new_plane_state->fb)); return !old_plane_state->crtc && new_plane_state->crtc; } /** * drm_atomic_plane_disabling - check whether a plane is being disabled * @old_plane_state: old atomic plane state * @new_plane_state: new atomic plane state * * Checks the atomic state of a plane to determine whether it's being disabled * or not. This also WARNs if it detects an invalid state (both CRTC and FB * need to either both be NULL or both be non-NULL). * * RETURNS: * True if the plane is being disabled, false otherwise. */ static inline bool drm_atomic_plane_disabling(struct drm_plane_state *old_plane_state, struct drm_plane_state *new_plane_state) { /* * When disabling a plane, CRTC and FB should always be NULL together. * Anything else should be considered a bug in the atomic core, so we * gently warn about it. */ WARN_ON((new_plane_state->crtc == NULL && new_plane_state->fb != NULL) || (new_plane_state->crtc != NULL && new_plane_state->fb == NULL)); return old_plane_state->crtc && !new_plane_state->crtc; } u32 * drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, u32 output_fmt, unsigned int *num_input_fmts); #endif /* DRM_ATOMIC_HELPER_H_ */
97 97 97 97 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 // SPDX-License-Identifier: GPL-2.0-only /* * fs/dax.c - Direct Access filesystem code * Copyright (c) 2013-2014 Intel Corporation * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> * Author: Ross Zwisler <ross.zwisler@linux.intel.com> */ #include <linux/atomic.h> #include <linux/blkdev.h> #include <linux/buffer_head.h> #include <linux/dax.h> #include <linux/fs.h> #include <linux/highmem.h> #include <linux/memcontrol.h> #include <linux/mm.h> #include <linux/mutex.h> #include <linux/pagevec.h> #include <linux/sched.h> #include <linux/sched/signal.h> #include <linux/uio.h> #include <linux/vmstat.h> #include <linux/pfn_t.h> #include <linux/sizes.h> #include <linux/mmu_notifier.h> #include <linux/iomap.h> #include <linux/rmap.h> #include <asm/pgalloc.h> #define CREATE_TRACE_POINTS #include <trace/events/fs_dax.h> /* We choose 4096 entries - same as per-zone page wait tables */ #define DAX_WAIT_TABLE_BITS 12 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) /* The 'colour' (ie low bits) within a PMD of a page offset. */ #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT) static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; static int __init init_dax_wait_table(void) { int i; for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) init_waitqueue_head(wait_table + i); return 0; } fs_initcall(init_dax_wait_table); /* * DAX pagecache entries use XArray value entries so they can't be mistaken * for pages. We use one bit for locking, one bit for the entry size (PMD) * and two more to tell us if the entry is a zero page or an empty entry that * is just used for locking. In total four special bits. * * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem * block allocation. */ #define DAX_SHIFT (4) #define DAX_LOCKED (1UL << 0) #define DAX_PMD (1UL << 1) #define DAX_ZERO_PAGE (1UL << 2) #define DAX_EMPTY (1UL << 3) static unsigned long dax_to_pfn(void *entry) { return xa_to_value(entry) >> DAX_SHIFT; } static void *dax_make_entry(pfn_t pfn, unsigned long flags) { return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT)); } static bool dax_is_locked(void *entry) { return xa_to_value(entry) & DAX_LOCKED; } static unsigned int dax_entry_order(void *entry) { if (xa_to_value(entry) & DAX_PMD) return PMD_ORDER; return 0; } static unsigned long dax_is_pmd_entry(void *entry) { return xa_to_value(entry) & DAX_PMD; } static bool dax_is_pte_entry(void *entry) { return !(xa_to_value(entry) & DAX_PMD); } static int dax_is_zero_entry(void *entry) { return xa_to_value(entry) & DAX_ZERO_PAGE; } static int dax_is_empty_entry(void *entry) { return xa_to_value(entry) & DAX_EMPTY; } /* * true if the entry that was found is of a smaller order than the entry * we were looking for */ static bool dax_is_conflict(void *entry) { return entry == XA_RETRY_ENTRY; } /* * DAX page cache entry locking */ struct exceptional_entry_key { struct xarray *xa; pgoff_t entry_start; }; struct wait_exceptional_entry_queue { wait_queue_entry_t wait; struct exceptional_entry_key key; }; /** * enum dax_wake_mode: waitqueue wakeup behaviour * @WAKE_ALL: wake all waiters in the waitqueue * @WAKE_NEXT: wake only the first waiter in the waitqueue */ enum dax_wake_mode { WAKE_ALL, WAKE_NEXT, }; static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas, void *entry, struct exceptional_entry_key *key) { unsigned long hash; unsigned long index = xas->xa_index; /* * If 'entry' is a PMD, align the 'index' that we use for the wait * queue to the start of that PMD. This ensures that all offsets in * the range covered by the PMD map to the same bit lock. */ if (dax_is_pmd_entry(entry)) index &= ~PG_PMD_COLOUR; key->xa = xas->xa; key->entry_start = index; hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS); return wait_table + hash; } static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode, int sync, void *keyp) { struct exceptional_entry_key *key = keyp; struct wait_exceptional_entry_queue *ewait = container_of(wait, struct wait_exceptional_entry_queue, wait); if (key->xa != ewait->key.xa || key->entry_start != ewait->key.entry_start) return 0; return autoremove_wake_function(wait, mode, sync, NULL); } /* * @entry may no longer be the entry at the index in the mapping. * The important information it's conveying is whether the entry at * this index used to be a PMD entry. */ static void dax_wake_entry(struct xa_state *xas, void *entry, enum dax_wake_mode mode) { struct exceptional_entry_key key; wait_queue_head_t *wq; wq = dax_entry_waitqueue(xas, entry, &key); /* * Checking for locked entry and prepare_to_wait_exclusive() happens * under the i_pages lock, ditto for entry handling in our callers. * So at this point all tasks that could have seen our entry locked * must be in the waitqueue and the following check will see them. */ if (waitqueue_active(wq)) __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key); } /* * Look up entry in page cache, wait for it to become unlocked if it * is a DAX entry and return it. The caller must subsequently call * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry() * if it did. The entry returned may have a larger order than @order. * If @order is larger than the order of the entry found in i_pages, this * function returns a dax_is_conflict entry. * * Must be called with the i_pages lock held. */ static void *get_unlocked_entry(struct xa_state *xas, unsigned int order) { void *entry; struct wait_exceptional_entry_queue ewait; wait_queue_head_t *wq; init_wait(&ewait.wait); ewait.wait.func = wake_exceptional_entry_func; for (;;) { entry = xas_find_conflict(xas); if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) return entry; if (dax_entry_order(entry) < order) return XA_RETRY_ENTRY; if (!dax_is_locked(entry)) return entry; wq = dax_entry_waitqueue(xas, entry, &ewait.key); prepare_to_wait_exclusive(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); xas_unlock_irq(xas); xas_reset(xas); schedule(); finish_wait(wq, &ewait.wait); xas_lock_irq(xas); } } /* * The only thing keeping the address space around is the i_pages lock * (it's cycled in clear_inode() after removing the entries from i_pages) * After we call xas_unlock_irq(), we cannot touch xas->xa. */ static void wait_entry_unlocked(struct xa_state *xas, void *entry) { struct wait_exceptional_entry_queue ewait; wait_queue_head_t *wq; init_wait(&ewait.wait); ewait.wait.func = wake_exceptional_entry_func; wq = dax_entry_waitqueue(xas, entry, &ewait.key); /* * Unlike get_unlocked_entry() there is no guarantee that this * path ever successfully retrieves an unlocked entry before an * inode dies. Perform a non-exclusive wait in case this path * never successfully performs its own wake up. */ prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); xas_unlock_irq(xas); schedule(); finish_wait(wq, &ewait.wait); } static void put_unlocked_entry(struct xa_state *xas, void *entry, enum dax_wake_mode mode) { if (entry && !dax_is_conflict(entry)) dax_wake_entry(xas, entry, mode); } /* * We used the xa_state to get the entry, but then we locked the entry and * dropped the xa_lock, so we know the xa_state is stale and must be reset * before use. */ static void dax_unlock_entry(struct xa_state *xas, void *entry) { void *old; BUG_ON(dax_is_locked(entry)); xas_reset(xas); xas_lock_irq(xas); old = xas_store(xas, entry); xas_unlock_irq(xas); BUG_ON(!dax_is_locked(old)); dax_wake_entry(xas, entry, WAKE_NEXT); } /* * Return: The entry stored at this location before it was locked. */ static void *dax_lock_entry(struct xa_state *xas, void *entry) { unsigned long v = xa_to_value(entry); return xas_store(xas, xa_mk_value(v | DAX_LOCKED)); } static unsigned long dax_entry_size(void *entry) { if (dax_is_zero_entry(entry)) return 0; else if (dax_is_empty_entry(entry)) return 0; else if (dax_is_pmd_entry(entry)) return PMD_SIZE; else return PAGE_SIZE; } static unsigned long dax_end_pfn(void *entry) { return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE; } /* * Iterate through all mapped pfns represented by an entry, i.e. skip * 'empty' and 'zero' entries. */ #define for_each_mapped_pfn(entry, pfn) \ for (pfn = dax_to_pfn(entry); \ pfn < dax_end_pfn(entry); pfn++) static inline bool dax_page_is_shared(struct page *page) { return page->mapping == PAGE_MAPPING_DAX_SHARED; } /* * Set the page->mapping with PAGE_MAPPING_DAX_SHARED flag, increase the * refcount. */ static inline void dax_page_share_get(struct page *page) { if (page->mapping != PAGE_MAPPING_DAX_SHARED) { /* * Reset the index if the page was already mapped * regularly before. */ if (page->mapping) page->share = 1; page->mapping = PAGE_MAPPING_DAX_SHARED; } page->share++; } static inline unsigned long dax_page_share_put(struct page *page) { return --page->share; } /* * When it is called in dax_insert_entry(), the shared flag will indicate that * whether this entry is shared by multiple files. If so, set the page->mapping * PAGE_MAPPING_DAX_SHARED, and use page->share as refcount. */ static void dax_associate_entry(void *entry, struct address_space *mapping, struct vm_area_struct *vma, unsigned long address, bool shared) { unsigned long size = dax_entry_size(entry), pfn, index; int i = 0; if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) return; index = linear_page_index(vma, address & ~(size - 1)); for_each_mapped_pfn(entry, pfn) { struct page *page = pfn_to_page(pfn); if (shared) { dax_page_share_get(page); } else { WARN_ON_ONCE(page->mapping); page->mapping = mapping; page->index = index + i++; } } } static void dax_disassociate_entry(void *entry, struct address_space *mapping, bool trunc) { unsigned long pfn; if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) return; for_each_mapped_pfn(entry, pfn) { struct page *page = pfn_to_page(pfn); WARN_ON_ONCE(trunc && page_ref_count(page) > 1); if (dax_page_is_shared(page)) { /* keep the shared flag if this page is still shared */ if (dax_page_share_put(page) > 0) continue; } else WARN_ON_ONCE(page->mapping && page->mapping != mapping); page->mapping = NULL; page->index = 0; } } static struct page *dax_busy_page(void *entry) { unsigned long pfn; for_each_mapped_pfn(entry, pfn) { struct page *page = pfn_to_page(pfn); if (page_ref_count(page) > 1) return page; } return NULL; } /** * dax_lock_folio - Lock the DAX entry corresponding to a folio * @folio: The folio whose entry we want to lock * * Context: Process context. * Return: A cookie to pass to dax_unlock_folio() or 0 if the entry could * not be locked. */ dax_entry_t dax_lock_folio(struct folio *folio) { XA_STATE(xas, NULL, 0); void *entry; /* Ensure folio->mapping isn't freed while we look at it */ rcu_read_lock(); for (;;) { struct address_space *mapping = READ_ONCE(folio->mapping); entry = NULL; if (!mapping || !dax_mapping(mapping)) break; /* * In the device-dax case there's no need to lock, a * struct dev_pagemap pin is sufficient to keep the * inode alive, and we assume we have dev_pagemap pin * otherwise we would not have a valid pfn_to_page() * translation. */ entry = (void *)~0UL; if (S_ISCHR(mapping->host->i_mode)) break; xas.xa = &mapping->i_pages; xas_lock_irq(&xas); if (mapping != folio->mapping) { xas_unlock_irq(&xas); continue; } xas_set(&xas, folio->index); entry = xas_load(&xas); if (dax_is_locked(entry)) { rcu_read_unlock(); wait_entry_unlocked(&xas, entry); rcu_read_lock(); continue; } dax_lock_entry(&xas, entry); xas_unlock_irq(&xas); break; } rcu_read_unlock(); return (dax_entry_t)entry; } void dax_unlock_folio(struct folio *folio, dax_entry_t cookie) { struct address_space *mapping = folio->mapping; XA_STATE(xas, &mapping->i_pages, folio->index); if (S_ISCHR(mapping->host->i_mode)) return; dax_unlock_entry(&xas, (void *)cookie); } /* * dax_lock_mapping_entry - Lock the DAX entry corresponding to a mapping * @mapping: the file's mapping whose entry we want to lock * @index: the offset within this file * @page: output the dax page corresponding to this dax entry * * Return: A cookie to pass to dax_unlock_mapping_entry() or 0 if the entry * could not be locked. */ dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, pgoff_t index, struct page **page) { XA_STATE(xas, NULL, 0); void *entry; rcu_read_lock(); for (;;) { entry = NULL; if (!dax_mapping(mapping)) break; xas.xa = &mapping->i_pages; xas_lock_irq(&xas); xas_set(&xas, index); entry = xas_load(&xas); if (dax_is_locked(entry)) { rcu_read_unlock(); wait_entry_unlocked(&xas, entry); rcu_read_lock(); continue; } if (!entry || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { /* * Because we are looking for entry from file's mapping * and index, so the entry may not be inserted for now, * or even a zero/empty entry. We don't think this is * an error case. So, return a special value and do * not output @page. */ entry = (void *)~0UL; } else { *page = pfn_to_page(dax_to_pfn(entry)); dax_lock_entry(&xas, entry); } xas_unlock_irq(&xas); break; } rcu_read_unlock(); return (dax_entry_t)entry; } void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index, dax_entry_t cookie) { XA_STATE(xas, &mapping->i_pages, index); if (cookie == ~0UL) return; dax_unlock_entry(&xas, (void *)cookie); } /* * Find page cache entry at given index. If it is a DAX entry, return it * with the entry locked. If the page cache doesn't contain an entry at * that index, add a locked empty entry. * * When requesting an entry with size DAX_PMD, grab_mapping_entry() will * either return that locked entry or will return VM_FAULT_FALLBACK. * This will happen if there are any PTE entries within the PMD range * that we are requesting. * * We always favor PTE entries over PMD entries. There isn't a flow where we * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD * insertion will fail if it finds any PTE entries already in the tree, and a * PTE insertion will cause an existing PMD entry to be unmapped and * downgraded to PTE entries. This happens for both PMD zero pages as * well as PMD empty entries. * * The exception to this downgrade path is for PMD entries that have * real storage backing them. We will leave these real PMD entries in * the tree, and PTE writes will simply dirty the entire PMD entry. * * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For * persistent memory the benefit is doubtful. We can add that later if we can * show it helps. * * On error, this function does not return an ERR_PTR. Instead it returns * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values * overlap with xarray value entries. */ static void *grab_mapping_entry(struct xa_state *xas, struct address_space *mapping, unsigned int order) { unsigned long index = xas->xa_index; bool pmd_downgrade; /* splitting PMD entry into PTE entries? */ void *entry; retry: pmd_downgrade = false; xas_lock_irq(xas); entry = get_unlocked_entry(xas, order); if (entry) { if (dax_is_conflict(entry)) goto fallback; if (!xa_is_value(entry)) { xas_set_err(xas, -EIO); goto out_unlock; } if (order == 0) { if (dax_is_pmd_entry(entry) && (dax_is_zero_entry(entry) || dax_is_empty_entry(entry))) { pmd_downgrade = true; } } } if (pmd_downgrade) { /* * Make sure 'entry' remains valid while we drop * the i_pages lock. */ dax_lock_entry(xas, entry); /* * Besides huge zero pages the only other thing that gets * downgraded are empty entries which don't need to be * unmapped. */ if (dax_is_zero_entry(entry)) { xas_unlock_irq(xas); unmap_mapping_pages(mapping, xas->xa_index & ~PG_PMD_COLOUR, PG_PMD_NR, false); xas_reset(xas); xas_lock_irq(xas); } dax_disassociate_entry(entry, mapping, false); xas_store(xas, NULL); /* undo the PMD join */ dax_wake_entry(xas, entry, WAKE_ALL); mapping->nrpages -= PG_PMD_NR; entry = NULL; xas_set(xas, index); } if (entry) { dax_lock_entry(xas, entry); } else { unsigned long flags = DAX_EMPTY; if (order > 0) flags |= DAX_PMD; entry = dax_make_entry(pfn_to_pfn_t(0), flags); dax_lock_entry(xas, entry); if (xas_error(xas)) goto out_unlock; mapping->nrpages += 1UL << order; } out_unlock: xas_unlock_irq(xas); if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) goto retry; if (xas->xa_node == XA_ERROR(-ENOMEM)) return xa_mk_internal(VM_FAULT_OOM); if (xas_error(xas)) return xa_mk_internal(VM_FAULT_SIGBUS); return entry; fallback: xas_unlock_irq(xas); return xa_mk_internal(VM_FAULT_FALLBACK); } /** * dax_layout_busy_page_range - find first pinned page in @mapping * @mapping: address space to scan for a page with ref count > 1 * @start: Starting offset. Page containing 'start' is included. * @end: End offset. Page containing 'end' is included. If 'end' is LLONG_MAX, * pages from 'start' till the end of file are included. * * DAX requires ZONE_DEVICE mapped pages. These pages are never * 'onlined' to the page allocator so they are considered idle when * page->count == 1. A filesystem uses this interface to determine if * any page in the mapping is busy, i.e. for DMA, or other * get_user_pages() usages. * * It is expected that the filesystem is holding locks to block the * establishment of new mappings in this address_space. I.e. it expects * to be able to run unmap_mapping_range() and subsequently not race * mapping_mapped() becoming true. */ struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end) { void *entry; unsigned int scanned = 0; struct page *page = NULL; pgoff_t start_idx = start >> PAGE_SHIFT; pgoff_t end_idx; XA_STATE(xas, &mapping->i_pages, start_idx); /* * In the 'limited' case get_user_pages() for dax is disabled. */ if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) return NULL; if (!dax_mapping(mapping) || !mapping_mapped(mapping)) return NULL; /* If end == LLONG_MAX, all pages from start to till end of file */ if (end == LLONG_MAX) end_idx = ULONG_MAX; else end_idx = end >> PAGE_SHIFT; /* * If we race get_user_pages_fast() here either we'll see the * elevated page count in the iteration and wait, or * get_user_pages_fast() will see that the page it took a reference * against is no longer mapped in the page tables and bail to the * get_user_pages() slow path. The slow path is protected by * pte_lock() and pmd_lock(). New references are not taken without * holding those locks, and unmap_mapping_pages() will not zero the * pte or pmd without holding the respective lock, so we are * guaranteed to either see new references or prevent new * references from being established. */ unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0); xas_lock_irq(&xas); xas_for_each(&xas, entry, end_idx) { if (WARN_ON_ONCE(!xa_is_value(entry))) continue; if (unlikely(dax_is_locked(entry))) entry = get_unlocked_entry(&xas, 0); if (entry) page = dax_busy_page(entry); put_unlocked_entry(&xas, entry, WAKE_NEXT); if (page) break; if (++scanned % XA_CHECK_SCHED) continue; xas_pause(&xas); xas_unlock_irq(&xas); cond_resched(); xas_lock_irq(&xas); } xas_unlock_irq(&xas); return page; } EXPORT_SYMBOL_GPL(dax_layout_busy_page_range); struct page *dax_layout_busy_page(struct address_space *mapping) { return dax_layout_busy_page_range(mapping, 0, LLONG_MAX); } EXPORT_SYMBOL_GPL(dax_layout_busy_page); static int __dax_invalidate_entry(struct address_space *mapping, pgoff_t index, bool trunc) { XA_STATE(xas, &mapping->i_pages, index); int ret = 0; void *entry; xas_lock_irq(&xas); entry = get_unlocked_entry(&xas, 0); if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) goto out; if (!trunc && (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) || xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE))) goto out; dax_disassociate_entry(entry, mapping, trunc); xas_store(&xas, NULL); mapping->nrpages -= 1UL << dax_entry_order(entry); ret = 1; out: put_unlocked_entry(&xas, entry, WAKE_ALL); xas_unlock_irq(&xas); return ret; } static int __dax_clear_dirty_range(struct address_space *mapping, pgoff_t start, pgoff_t end) { XA_STATE(xas, &mapping->i_pages, start); unsigned int scanned = 0; void *entry; xas_lock_irq(&xas); xas_for_each(&xas, entry, end) { entry = get_unlocked_entry(&xas, 0); xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY); xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE); put_unlocked_entry(&xas, entry, WAKE_NEXT); if (++scanned % XA_CHECK_SCHED) continue; xas_pause(&xas); xas_unlock_irq(&xas); cond_resched(); xas_lock_irq(&xas); } xas_unlock_irq(&xas); return 0; } /* * Delete DAX entry at @index from @mapping. Wait for it * to be unlocked before deleting it. */ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) { int ret = __dax_invalidate_entry(mapping, index, true); /* * This gets called from truncate / punch_hole path. As such, the caller * must hold locks protecting against concurrent modifications of the * page cache (usually fs-private i_mmap_sem for writing). Since the * caller has seen a DAX entry for this index, we better find it * at that index as well... */ WARN_ON_ONCE(!ret); return ret; } /* * Invalidate DAX entry if it is clean. */ int dax_invalidate_mapping_entry_sync(struct address_space *mapping, pgoff_t index) { return __dax_invalidate_entry(mapping, index, false); } static pgoff_t dax_iomap_pgoff(const struct iomap *iomap, loff_t pos) { return PHYS_PFN(iomap->addr + (pos & PAGE_MASK) - iomap->offset); } static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter) { pgoff_t pgoff = dax_iomap_pgoff(&iter->iomap, iter->pos); void *vto, *kaddr; long rc; int id; id = dax_read_lock(); rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, DAX_ACCESS, &kaddr, NULL); if (rc < 0) { dax_read_unlock(id); return rc; } vto = kmap_atomic(vmf->cow_page); copy_user_page(vto, kaddr, vmf->address, vmf->cow_page); kunmap_atomic(vto); dax_read_unlock(id); return 0; } /* * MAP_SYNC on a dax mapping guarantees dirty metadata is * flushed on write-faults (non-cow), but not read-faults. */ static bool dax_fault_is_synchronous(const struct iomap_iter *iter, struct vm_area_struct *vma) { return (iter->flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) && (iter->iomap.flags & IOMAP_F_DIRTY); } /* * By this point grab_mapping_entry() has ensured that we have a locked entry * of the appropriate size so we don't have to worry about downgrading PMDs to * PTEs. If we happen to be trying to insert a PTE and there is a PMD * already in the tree, we will skip the insertion and just dirty the PMD as * appropriate. */ static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, const struct iomap_iter *iter, void *entry, pfn_t pfn, unsigned long flags) { struct address_space *mapping = vmf->vma->vm_file->f_mapping; void *new_entry = dax_make_entry(pfn, flags); bool write = iter->flags & IOMAP_WRITE; bool dirty = write && !dax_fault_is_synchronous(iter, vmf->vma); bool shared = iter->iomap.flags & IOMAP_F_SHARED; if (dirty) __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); if (shared || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) { unsigned long index = xas->xa_index; /* we are replacing a zero page with block mapping */ if (dax_is_pmd_entry(entry)) unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, PG_PMD_NR, false); else /* pte entry */ unmap_mapping_pages(mapping, index, 1, false); } xas_reset(xas); xas_lock_irq(xas); if (shared || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { void *old; dax_disassociate_entry(entry, mapping, false); dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address, shared); /* * Only swap our new entry into the page cache if the current * entry is a zero page or an empty entry. If a normal PTE or * PMD entry is already in the cache, we leave it alone. This * means that if we are trying to insert a PTE and the * existing entry is a PMD, we will just leave the PMD in the * tree and dirty it if necessary. */ old = dax_lock_entry(xas, new_entry); WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) | DAX_LOCKED)); entry = new_entry; } else { xas_load(xas); /* Walk the xa_state */ } if (dirty) xas_set_mark(xas, PAGECACHE_TAG_DIRTY); if (write && shared) xas_set_mark(xas, PAGECACHE_TAG_TOWRITE); xas_unlock_irq(xas); return entry; } static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, struct address_space *mapping, void *entry) { unsigned long pfn, index, count, end; long ret = 0; struct vm_area_struct *vma; /* * A page got tagged dirty in DAX mapping? Something is seriously * wrong. */ if (WARN_ON(!xa_is_value(entry))) return -EIO; if (unlikely(dax_is_locked(entry))) { void *old_entry = entry; entry = get_unlocked_entry(xas, 0); /* Entry got punched out / reallocated? */ if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) goto put_unlocked; /* * Entry got reallocated elsewhere? No need to writeback. * We have to compare pfns as we must not bail out due to * difference in lockbit or entry type. */ if (dax_to_pfn(old_entry) != dax_to_pfn(entry)) goto put_unlocked; if (WARN_ON_ONCE(dax_is_empty_entry(entry) || dax_is_zero_entry(entry))) { ret = -EIO; goto put_unlocked; } /* Another fsync thread may have already done this entry */ if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE)) goto put_unlocked; } /* Lock the entry to serialize with page faults */ dax_lock_entry(xas, entry); /* * We can clear the tag now but we have to be careful so that concurrent * dax_writeback_one() calls for the same index cannot finish before we * actually flush the caches. This is achieved as the calls will look * at the entry only under the i_pages lock and once they do that * they will see the entry locked and wait for it to unlock. */ xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE); xas_unlock_irq(xas); /* * If dax_writeback_mapping_range() was given a wbc->range_start * in the middle of a PMD, the 'index' we use needs to be * aligned to the start of the PMD. * This allows us to flush for PMD_SIZE and not have to worry about * partial PMD writebacks. */ pfn = dax_to_pfn(entry); count = 1UL << dax_entry_order(entry); index = xas->xa_index & ~(count - 1); end = index + count - 1; /* Walk all mappings of a given index of a file and writeprotect them */ i_mmap_lock_read(mapping); vma_interval_tree_foreach(vma, &mapping->i_mmap, index, end) { pfn_mkclean_range(pfn, count, index, vma); cond_resched(); } i_mmap_unlock_read(mapping); dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE); /* * After we have flushed the cache, we can clear the dirty tag. There * cannot be new dirty data in the pfn after the flush has completed as * the pfn mappings are writeprotected and fault waits for mapping * entry lock. */ xas_reset(xas); xas_lock_irq(xas); xas_store(xas, entry); xas_clear_mark(xas, PAGECACHE_TAG_DIRTY); dax_wake_entry(xas, entry, WAKE_NEXT); trace_dax_writeback_one(mapping->host, index, count); return ret; put_unlocked: put_unlocked_entry(xas, entry, WAKE_NEXT); return ret; } /* * Flush the mapping to the persistent domain within the byte range of [start, * end]. This is required by data integrity operations to ensure file data is * on persistent storage prior to completion of the operation. */ int dax_writeback_mapping_range(struct address_space *mapping, struct dax_device *dax_dev, struct writeback_control *wbc) { XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); struct inode *inode = mapping->host; pgoff_t end_index = wbc->range_end >> PAGE_SHIFT; void *entry; int ret = 0; unsigned int scanned = 0; if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) return -EIO; if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL) return 0; trace_dax_writeback_range(inode, xas.xa_index, end_index); tag_pages_for_writeback(mapping, xas.xa_index, end_index); xas_lock_irq(&xas); xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) { ret = dax_writeback_one(&xas, dax_dev, mapping, entry); if (ret < 0) { mapping_set_error(mapping, ret); break; } if (++scanned % XA_CHECK_SCHED) continue; xas_pause(&xas); xas_unlock_irq(&xas); cond_resched(); xas_lock_irq(&xas); } xas_unlock_irq(&xas); trace_dax_writeback_range_done(inode, xas.xa_index, end_index); return ret; } EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos, size_t size, void **kaddr, pfn_t *pfnp) { pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); int id, rc = 0; long length; id = dax_read_lock(); length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), DAX_ACCESS, kaddr, pfnp); if (length < 0) { rc = length; goto out; } if (!pfnp) goto out_check_addr; rc = -EINVAL; if (PFN_PHYS(length) < size) goto out; if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1)) goto out; /* For larger pages we need devmap */ if (length > 1 && !pfn_t_devmap(*pfnp)) goto out; rc = 0; out_check_addr: if (!kaddr) goto out; if (!*kaddr) rc = -EFAULT; out: dax_read_unlock(id); return rc; } /** * dax_iomap_copy_around - Prepare for an unaligned write to a shared/cow page * by copying the data before and after the range to be written. * @pos: address to do copy from. * @length: size of copy operation. * @align_size: aligned w.r.t align_size (either PMD_SIZE or PAGE_SIZE) * @srcmap: iomap srcmap * @daddr: destination address to copy to. * * This can be called from two places. Either during DAX write fault (page * aligned), to copy the length size data to daddr. Or, while doing normal DAX * write operation, dax_iomap_iter() might call this to do the copy of either * start or end unaligned address. In the latter case the rest of the copy of * aligned ranges is taken care by dax_iomap_iter() itself. * If the srcmap contains invalid data, such as HOLE and UNWRITTEN, zero the * area to make sure no old data remains. */ static int dax_iomap_copy_around(loff_t pos, uint64_t length, size_t align_size, const struct iomap *srcmap, void *daddr) { loff_t head_off = pos & (align_size - 1); size_t size = ALIGN(head_off + length, align_size); loff_t end = pos + length; loff_t pg_end = round_up(end, align_size); /* copy_all is usually in page fault case */ bool copy_all = head_off == 0 && end == pg_end; /* zero the edges if srcmap is a HOLE or IOMAP_UNWRITTEN */ bool zero_edge = srcmap->flags & IOMAP_F_SHARED || srcmap->type == IOMAP_UNWRITTEN; void *saddr = NULL; int ret = 0; if (!zero_edge) { ret = dax_iomap_direct_access(srcmap, pos, size, &saddr, NULL); if (ret) return dax_mem2blk_err(ret); } if (copy_all) { if (zero_edge) memset(daddr, 0, size); else ret = copy_mc_to_kernel(daddr, saddr, length); goto out; } /* Copy the head part of the range */ if (head_off) { if (zero_edge) memset(daddr, 0, head_off); else { ret = copy_mc_to_kernel(daddr, saddr, head_off); if (ret) return -EIO; } } /* Copy the tail part of the range */ if (end < pg_end) { loff_t tail_off = head_off + length; loff_t tail_len = pg_end - end; if (zero_edge) memset(daddr + tail_off, 0, tail_len); else { ret = copy_mc_to_kernel(daddr + tail_off, saddr + tail_off, tail_len); if (ret) return -EIO; } } out: if (zero_edge) dax_flush(srcmap->dax_dev, daddr, size); return ret ? -EIO : 0; } /* * The user has performed a load from a hole in the file. Allocating a new * page in the file would cause excessive storage usage for workloads with * sparse files. Instead we insert a read-only mapping of the 4k zero page. * If this page is ever written to we will re-fault and change the mapping to * point to real DAX storage instead. */ static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf, const struct iomap_iter *iter, void **entry) { struct inode *inode = iter->inode; unsigned long vaddr = vmf->address; pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr)); vm_fault_t ret; *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE); ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); trace_dax_load_hole(inode, vmf, ret); return ret; } #ifdef CONFIG_FS_DAX_PMD static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, const struct iomap_iter *iter, void **entry) { struct address_space *mapping = vmf->vma->vm_file->f_mapping; unsigned long pmd_addr = vmf->address & PMD_MASK; struct vm_area_struct *vma = vmf->vma; struct inode *inode = mapping->host; pgtable_t pgtable = NULL; struct folio *zero_folio; spinlock_t *ptl; pmd_t pmd_entry; pfn_t pfn; zero_folio = mm_get_huge_zero_folio(vmf->vma->vm_mm); if (unlikely(!zero_folio)) goto fallback; pfn = page_to_pfn_t(&zero_folio->page); *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_PMD | DAX_ZERO_PAGE); if (arch_needs_pgtable_deposit()) { pgtable = pte_alloc_one(vma->vm_mm); if (!pgtable) return VM_FAULT_OOM; } ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); if (!pmd_none(*(vmf->pmd))) { spin_unlock(ptl); goto fallback; } if (pgtable) { pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); mm_inc_nr_ptes(vma->vm_mm); } pmd_entry = mk_pmd(&zero_folio->page, vmf->vma->vm_page_prot); pmd_entry = pmd_mkhuge(pmd_entry); set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); spin_unlock(ptl); trace_dax_pmd_load_hole(inode, vmf, zero_folio, *entry); return VM_FAULT_NOPAGE; fallback: if (pgtable) pte_free(vma->vm_mm, pgtable); trace_dax_pmd_load_hole_fallback(inode, vmf, zero_folio, *entry); return VM_FAULT_FALLBACK; } #else static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, const struct iomap_iter *iter, void **entry) { return VM_FAULT_FALLBACK; } #endif /* CONFIG_FS_DAX_PMD */ static s64 dax_unshare_iter(struct iomap_iter *iter) { struct iomap *iomap = &iter->iomap; const struct iomap *srcmap = iomap_iter_srcmap(iter); loff_t copy_pos = iter->pos; u64 copy_len = iomap_length(iter); u32 mod; int id = 0; s64 ret = 0; void *daddr = NULL, *saddr = NULL; if (!iomap_want_unshare_iter(iter)) return iomap_length(iter); /* * Extend the file range to be aligned to fsblock/pagesize, because * we need to copy entire blocks, not just the byte range specified. * Invalidate the mapping because we're about to CoW. */ mod = offset_in_page(copy_pos); if (mod) { copy_len += mod; copy_pos -= mod; } mod = offset_in_page(copy_pos + copy_len); if (mod) copy_len += PAGE_SIZE - mod; invalidate_inode_pages2_range(iter->inode->i_mapping, copy_pos >> PAGE_SHIFT, (copy_pos + copy_len - 1) >> PAGE_SHIFT); id = dax_read_lock(); ret = dax_iomap_direct_access(iomap, copy_pos, copy_len, &daddr, NULL); if (ret < 0) goto out_unlock; ret = dax_iomap_direct_access(srcmap, copy_pos, copy_len, &saddr, NULL); if (ret < 0) goto out_unlock; if (copy_mc_to_kernel(daddr, saddr, copy_len) == 0) ret = iomap_length(iter); else ret = -EIO; out_unlock: dax_read_unlock(id); return dax_mem2blk_err(ret); } int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len, const struct iomap_ops *ops) { struct iomap_iter iter = { .inode = inode, .pos = pos, .flags = IOMAP_WRITE | IOMAP_UNSHARE | IOMAP_DAX, }; loff_t size = i_size_read(inode); int ret; if (pos < 0 || pos >= size) return 0; iter.len = min(len, size - pos); while ((ret = iomap_iter(&iter, ops)) > 0) iter.processed = dax_unshare_iter(&iter); return ret; } EXPORT_SYMBOL_GPL(dax_file_unshare); static int dax_memzero(struct iomap_iter *iter, loff_t pos, size_t size) { const struct iomap *iomap = &iter->iomap; const struct iomap *srcmap = iomap_iter_srcmap(iter); unsigned offset = offset_in_page(pos); pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); void *kaddr; long ret; ret = dax_direct_access(iomap->dax_dev, pgoff, 1, DAX_ACCESS, &kaddr, NULL); if (ret < 0) return dax_mem2blk_err(ret); memset(kaddr + offset, 0, size); if (iomap->flags & IOMAP_F_SHARED) ret = dax_iomap_copy_around(pos, size, PAGE_SIZE, srcmap, kaddr); else dax_flush(iomap->dax_dev, kaddr + offset, size); return ret; } static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero) { const struct iomap *iomap = &iter->iomap; const struct iomap *srcmap = iomap_iter_srcmap(iter); loff_t pos = iter->pos; u64 length = iomap_length(iter); s64 written = 0; /* already zeroed? we're done. */ if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) return length; /* * invalidate the pages whose sharing state is to be changed * because of CoW. */ if (iomap->flags & IOMAP_F_SHARED) invalidate_inode_pages2_range(iter->inode->i_mapping, pos >> PAGE_SHIFT, (pos + length - 1) >> PAGE_SHIFT); do { unsigned offset = offset_in_page(pos); unsigned size = min_t(u64, PAGE_SIZE - offset, length); pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); long rc; int id; id = dax_read_lock(); if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE) rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1); else rc = dax_memzero(iter, pos, size); dax_read_unlock(id); if (rc < 0) return rc; pos += size; length -= size; written += size; } while (length > 0); if (did_zero) *did_zero = true; return written; } int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, const struct iomap_ops *ops) { struct iomap_iter iter = { .inode = inode, .pos = pos, .len = len, .flags = IOMAP_DAX | IOMAP_ZERO, }; int ret; while ((ret = iomap_iter(&iter, ops)) > 0) iter.processed = dax_zero_iter(&iter, did_zero); return ret; } EXPORT_SYMBOL_GPL(dax_zero_range); int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, const struct iomap_ops *ops) { unsigned int blocksize = i_blocksize(inode); unsigned int off = pos & (blocksize - 1); /* Block boundary? Nothing to do */ if (!off) return 0; return dax_zero_range(inode, pos, blocksize - off, did_zero, ops); } EXPORT_SYMBOL_GPL(dax_truncate_page); static loff_t dax_iomap_iter(const struct iomap_iter *iomi, struct iov_iter *iter) { const struct iomap *iomap = &iomi->iomap; const struct iomap *srcmap = iomap_iter_srcmap(iomi); loff_t length = iomap_length(iomi); loff_t pos = iomi->pos; struct dax_device *dax_dev = iomap->dax_dev; loff_t end = pos + length, done = 0; bool write = iov_iter_rw(iter) == WRITE; bool cow = write && iomap->flags & IOMAP_F_SHARED; ssize_t ret = 0; size_t xfer; int id; if (!write) { end = min(end, i_size_read(iomi->inode)); if (pos >= end) return 0; if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) return iov_iter_zero(min(length, end - pos), iter); } /* * In DAX mode, enforce either pure overwrites of written extents, or * writes to unwritten extents as part of a copy-on-write operation. */ if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED && !(iomap->flags & IOMAP_F_SHARED))) return -EIO; /* * Write can allocate block for an area which has a hole page mapped * into page tables. We have to tear down these mappings so that data * written by write(2) is visible in mmap. */ if (iomap->flags & IOMAP_F_NEW || cow) { /* * Filesystem allows CoW on non-shared extents. The src extents * may have been mmapped with dirty mark before. To be able to * invalidate its dax entries, we need to clear the dirty mark * in advance. */ if (cow) __dax_clear_dirty_range(iomi->inode->i_mapping, pos >> PAGE_SHIFT, (end - 1) >> PAGE_SHIFT); invalidate_inode_pages2_range(iomi->inode->i_mapping, pos >> PAGE_SHIFT, (end - 1) >> PAGE_SHIFT); } id = dax_read_lock(); while (pos < end) { unsigned offset = pos & (PAGE_SIZE - 1); const size_t size = ALIGN(length + offset, PAGE_SIZE); pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); ssize_t map_len; bool recovery = false; void *kaddr; if (fatal_signal_pending(current)) { ret = -EINTR; break; } map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), DAX_ACCESS, &kaddr, NULL); if (map_len == -EHWPOISON && iov_iter_rw(iter) == WRITE) { map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), DAX_RECOVERY_WRITE, &kaddr, NULL); if (map_len > 0) recovery = true; } if (map_len < 0) { ret = dax_mem2blk_err(map_len); break; } if (cow) { ret = dax_iomap_copy_around(pos, length, PAGE_SIZE, srcmap, kaddr); if (ret) break; } map_len = PFN_PHYS(map_len); kaddr += offset; map_len -= offset; if (map_len > end - pos) map_len = end - pos; if (recovery) xfer = dax_recovery_write(dax_dev, pgoff, kaddr, map_len, iter); else if (write) xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr, map_len, iter); else xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr, map_len, iter); pos += xfer; length -= xfer; done += xfer; if (xfer == 0) ret = -EFAULT; if (xfer < map_len) break; } dax_read_unlock(id); return done ? done : ret; } /** * dax_iomap_rw - Perform I/O to a DAX file * @iocb: The control block for this I/O * @iter: The addresses to do I/O from or to * @ops: iomap ops passed from the file system * * This function performs read and write operations to directly mapped * persistent memory. The callers needs to take care of read/write exclusion * and evicting any page cache pages in the region under I/O. */ ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops) { struct iomap_iter iomi = { .inode = iocb->ki_filp->f_mapping->host, .pos = iocb->ki_pos, .len = iov_iter_count(iter), .flags = IOMAP_DAX, }; loff_t done = 0; int ret; if (!iomi.len) return 0; if (iov_iter_rw(iter) == WRITE) { lockdep_assert_held_write(&iomi.inode->i_rwsem); iomi.flags |= IOMAP_WRITE; } else { lockdep_assert_held(&iomi.inode->i_rwsem); } if (iocb->ki_flags & IOCB_NOWAIT) iomi.flags |= IOMAP_NOWAIT; while ((ret = iomap_iter(&iomi, ops)) > 0) iomi.processed = dax_iomap_iter(&iomi, iter); done = iomi.pos - iocb->ki_pos; iocb->ki_pos = iomi.pos; return done ? done : ret; } EXPORT_SYMBOL_GPL(dax_iomap_rw); static vm_fault_t dax_fault_return(int error) { if (error == 0) return VM_FAULT_NOPAGE; return vmf_error(error); } /* * When handling a synchronous page fault and the inode need a fsync, we can * insert the PTE/PMD into page tables only after that fsync happened. Skip * insertion for now and return the pfn so that caller can insert it after the * fsync is done. */ static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn) { if (WARN_ON_ONCE(!pfnp)) return VM_FAULT_SIGBUS; *pfnp = pfn; return VM_FAULT_NEEDDSYNC; } static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf, const struct iomap_iter *iter) { vm_fault_t ret; int error = 0; switch (iter->iomap.type) { case IOMAP_HOLE: case IOMAP_UNWRITTEN: clear_user_highpage(vmf->cow_page, vmf->address); break; case IOMAP_MAPPED: error = copy_cow_page_dax(vmf, iter); break; default: WARN_ON_ONCE(1); error = -EIO; break; } if (error) return dax_fault_return(error); __SetPageUptodate(vmf->cow_page); ret = finish_fault(vmf); if (!ret) return VM_FAULT_DONE_COW; return ret; } /** * dax_fault_iter - Common actor to handle pfn insertion in PTE/PMD fault. * @vmf: vm fault instance * @iter: iomap iter * @pfnp: pfn to be returned * @xas: the dax mapping tree of a file * @entry: an unlocked dax entry to be inserted * @pmd: distinguish whether it is a pmd fault */ static vm_fault_t dax_fault_iter(struct vm_fault *vmf, const struct iomap_iter *iter, pfn_t *pfnp, struct xa_state *xas, void **entry, bool pmd) { const struct iomap *iomap = &iter->iomap; const struct iomap *srcmap = iomap_iter_srcmap(iter); size_t size = pmd ? PMD_SIZE : PAGE_SIZE; loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT; bool write = iter->flags & IOMAP_WRITE; unsigned long entry_flags = pmd ? DAX_PMD : 0; int err = 0; pfn_t pfn; void *kaddr; if (!pmd && vmf->cow_page) return dax_fault_cow_page(vmf, iter); /* if we are reading UNWRITTEN and HOLE, return a hole. */ if (!write && (iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) { if (!pmd) return dax_load_hole(xas, vmf, iter, entry); return dax_pmd_load_hole(xas, vmf, iter, entry); } if (iomap->type != IOMAP_MAPPED && !(iomap->flags & IOMAP_F_SHARED)) { WARN_ON_ONCE(1); return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS; } err = dax_iomap_direct_access(iomap, pos, size, &kaddr, &pfn); if (err) return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err); *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags); if (write && iomap->flags & IOMAP_F_SHARED) { err = dax_iomap_copy_around(pos, size, size, srcmap, kaddr); if (err) return dax_fault_return(err); } if (dax_fault_is_synchronous(iter, vmf->vma)) return dax_fault_synchronous_pfnp(pfnp, pfn); /* insert PMD pfn */ if (pmd) return vmf_insert_pfn_pmd(vmf, pfn, write); /* insert PTE pfn */ if (write) return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); return vmf_insert_mixed(vmf->vma, vmf->address, pfn); } static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops) { struct address_space *mapping = vmf->vma->vm_file->f_mapping; XA_STATE(xas, &mapping->i_pages, vmf->pgoff); struct iomap_iter iter = { .inode = mapping->host, .pos = (loff_t)vmf->pgoff << PAGE_SHIFT, .len = PAGE_SIZE, .flags = IOMAP_DAX | IOMAP_FAULT, }; vm_fault_t ret = 0; void *entry; int error; trace_dax_pte_fault(iter.inode, vmf, ret); /* * Check whether offset isn't beyond end of file now. Caller is supposed * to hold locks serializing us with truncate / punch hole so this is * a reliable test. */ if (iter.pos >= i_size_read(iter.inode)) { ret = VM_FAULT_SIGBUS; goto out; } if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page) iter.flags |= IOMAP_WRITE; entry = grab_mapping_entry(&xas, mapping, 0); if (xa_is_internal(entry)) { ret = xa_to_internal(entry); goto out; } /* * It is possible, particularly with mixed reads & writes to private * mappings, that we have raced with a PMD fault that overlaps with * the PTE we need to set up. If so just return and the fault will be * retried. */ if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { ret = VM_FAULT_NOPAGE; goto unlock_entry; } while ((error = iomap_iter(&iter, ops)) > 0) { if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) { iter.processed = -EIO; /* fs corruption? */ continue; } ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false); if (ret != VM_FAULT_SIGBUS && (iter.iomap.flags & IOMAP_F_NEW)) { count_vm_event(PGMAJFAULT); count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); ret |= VM_FAULT_MAJOR; } if (!(ret & VM_FAULT_ERROR)) iter.processed = PAGE_SIZE; } if (iomap_errp) *iomap_errp = error; if (!ret && error) ret = dax_fault_return(error); unlock_entry: dax_unlock_entry(&xas, entry); out: trace_dax_pte_fault_done(iter.inode, vmf, ret); return ret; } #ifdef CONFIG_FS_DAX_PMD static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas, pgoff_t max_pgoff) { unsigned long pmd_addr = vmf->address & PMD_MASK; bool write = vmf->flags & FAULT_FLAG_WRITE; /* * Make sure that the faulting address's PMD offset (color) matches * the PMD offset from the start of the file. This is necessary so * that a PMD range in the page table overlaps exactly with a PMD * range in the page cache. */ if ((vmf->pgoff & PG_PMD_COLOUR) != ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) return true; /* Fall back to PTEs if we're going to COW */ if (write && !(vmf->vma->vm_flags & VM_SHARED)) return true; /* If the PMD would extend outside the VMA */ if (pmd_addr < vmf->vma->vm_start) return true; if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end) return true; /* If the PMD would extend beyond the file size */ if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff) return true; return false; } static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, const struct iomap_ops *ops) { struct address_space *mapping = vmf->vma->vm_file->f_mapping; XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); struct iomap_iter iter = { .inode = mapping->host, .len = PMD_SIZE, .flags = IOMAP_DAX | IOMAP_FAULT, }; vm_fault_t ret = VM_FAULT_FALLBACK; pgoff_t max_pgoff; void *entry; if (vmf->flags & FAULT_FLAG_WRITE) iter.flags |= IOMAP_WRITE; /* * Check whether offset isn't beyond end of file now. Caller is * supposed to hold locks serializing us with truncate / punch hole so * this is a reliable test. */ max_pgoff = DIV_ROUND_UP(i_size_read(iter.inode), PAGE_SIZE); trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0); if (xas.xa_index >= max_pgoff) { ret = VM_FAULT_SIGBUS; goto out; } if (dax_fault_check_fallback(vmf, &xas, max_pgoff)) goto fallback; /* * grab_mapping_entry() will make sure we get an empty PMD entry, * a zero PMD entry or a DAX PMD. If it can't (because a PTE * entry is already in the array, for instance), it will return * VM_FAULT_FALLBACK. */ entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); if (xa_is_internal(entry)) { ret = xa_to_internal(entry); goto fallback; } /* * It is possible, particularly with mixed reads & writes to private * mappings, that we have raced with a PTE fault that overlaps with * the PMD we need to set up. If so just return and the fault will be * retried. */ if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && !pmd_devmap(*vmf->pmd)) { ret = 0; goto unlock_entry; } iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT; while (iomap_iter(&iter, ops) > 0) { if (iomap_length(&iter) < PMD_SIZE) continue; /* actually breaks out of the loop */ ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true); if (ret != VM_FAULT_FALLBACK) iter.processed = PMD_SIZE; } unlock_entry: dax_unlock_entry(&xas, entry); fallback: if (ret == VM_FAULT_FALLBACK) { split_huge_pmd(vmf->vma, vmf->pmd, vmf->address); count_vm_event(THP_FAULT_FALLBACK); } out: trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret); return ret; } #else static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, const struct iomap_ops *ops) { return VM_FAULT_FALLBACK; } #endif /* CONFIG_FS_DAX_PMD */ /** * dax_iomap_fault - handle a page fault on a DAX file * @vmf: The description of the fault * @order: Order of the page to fault in * @pfnp: PFN to insert for synchronous faults if fsync is required * @iomap_errp: Storage for detailed error code in case of error * @ops: Iomap ops passed from the file system * * When a page fault occurs, filesystems may call this helper in * their fault handler for DAX files. dax_iomap_fault() assumes the caller * has done all the necessary locking for page fault to proceed * successfully. */ vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order, pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops) { if (order == 0) return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); else if (order == PMD_ORDER) return dax_iomap_pmd_fault(vmf, pfnp, ops); else return VM_FAULT_FALLBACK; } EXPORT_SYMBOL_GPL(dax_iomap_fault); /* * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables * @vmf: The description of the fault * @pfn: PFN to insert * @order: Order of entry to insert. * * This function inserts a writeable PTE or PMD entry into the page tables * for an mmaped DAX file. It also marks the page cache entry as dirty. */ static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) { struct address_space *mapping = vmf->vma->vm_file->f_mapping; XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); void *entry; vm_fault_t ret; xas_lock_irq(&xas); entry = get_unlocked_entry(&xas, order); /* Did we race with someone splitting entry or so? */ if (!entry || dax_is_conflict(entry) || (order == 0 && !dax_is_pte_entry(entry))) { put_unlocked_entry(&xas, entry, WAKE_NEXT); xas_unlock_irq(&xas); trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, VM_FAULT_NOPAGE); return VM_FAULT_NOPAGE; } xas_set_mark(&xas, PAGECACHE_TAG_DIRTY); dax_lock_entry(&xas, entry); xas_unlock_irq(&xas); if (order == 0) ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); #ifdef CONFIG_FS_DAX_PMD else if (order == PMD_ORDER) ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE); #endif else ret = VM_FAULT_FALLBACK; dax_unlock_entry(&xas, entry); trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); return ret; } /** * dax_finish_sync_fault - finish synchronous page fault * @vmf: The description of the fault * @order: Order of entry to be inserted * @pfn: PFN to insert * * This function ensures that the file range touched by the page fault is * stored persistently on the media and handles inserting of appropriate page * table entry. */ vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, unsigned int order, pfn_t pfn) { int err; loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; size_t len = PAGE_SIZE << order; err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); if (err) return VM_FAULT_SIGBUS; return dax_insert_pfn_mkwrite(vmf, pfn, order); } EXPORT_SYMBOL_GPL(dax_finish_sync_fault); static loff_t dax_range_compare_iter(struct iomap_iter *it_src, struct iomap_iter *it_dest, u64 len, bool *same) { const struct iomap *smap = &it_src->iomap; const struct iomap *dmap = &it_dest->iomap; loff_t pos1 = it_src->pos, pos2 = it_dest->pos; void *saddr, *daddr; int id, ret; len = min(len, min(smap->length, dmap->length)); if (smap->type == IOMAP_HOLE && dmap->type == IOMAP_HOLE) { *same = true; return len; } if (smap->type == IOMAP_HOLE || dmap->type == IOMAP_HOLE) { *same = false; return 0; } id = dax_read_lock(); ret = dax_iomap_direct_access(smap, pos1, ALIGN(pos1 + len, PAGE_SIZE), &saddr, NULL); if (ret < 0) goto out_unlock; ret = dax_iomap_direct_access(dmap, pos2, ALIGN(pos2 + len, PAGE_SIZE), &daddr, NULL); if (ret < 0) goto out_unlock; *same = !memcmp(saddr, daddr, len); if (!*same) len = 0; dax_read_unlock(id); return len; out_unlock: dax_read_unlock(id); return -EIO; } int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff, struct inode *dst, loff_t dstoff, loff_t len, bool *same, const struct iomap_ops *ops) { struct iomap_iter src_iter = { .inode = src, .pos = srcoff, .len = len, .flags = IOMAP_DAX, }; struct iomap_iter dst_iter = { .inode = dst, .pos = dstoff, .len = len, .flags = IOMAP_DAX, }; int ret, compared = 0; while ((ret = iomap_iter(&src_iter, ops)) > 0 && (ret = iomap_iter(&dst_iter, ops)) > 0) { compared = dax_range_compare_iter(&src_iter, &dst_iter, min(src_iter.len, dst_iter.len), same); if (compared < 0) return ret; src_iter.processed = dst_iter.processed = compared; } return ret; } int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t *len, unsigned int remap_flags, const struct iomap_ops *ops) { return __generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out, len, remap_flags, ops); } EXPORT_SYMBOL_GPL(dax_remap_file_range_prep);
16 16 11 11 11 10 9 1 8 6 4 4 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) International Business Machines Corp., 2006 * * Author: Artem Bityutskiy (Битюцкий Артём) */ /* * This file includes implementation of UBI character device operations. * * There are two kinds of character devices in UBI: UBI character devices and * UBI volume character devices. UBI character devices allow users to * manipulate whole volumes: create, remove, and re-size them. Volume character * devices provide volume I/O capabilities. * * Major and minor numbers are assigned dynamically to both UBI and volume * character devices. * * Well, there is the third kind of character devices - the UBI control * character device, which allows to manipulate by UBI devices - create and * delete them. In other words, it is used for attaching and detaching MTD * devices. */ #include <linux/module.h> #include <linux/stat.h> #include <linux/slab.h> #include <linux/ioctl.h> #include <linux/capability.h> #include <linux/uaccess.h> #include <linux/compat.h> #include <linux/math64.h> #include <mtd/ubi-user.h> #include "ubi.h" /** * get_exclusive - get exclusive access to an UBI volume. * @desc: volume descriptor * * This function changes UBI volume open mode to "exclusive". Returns previous * mode value (positive integer) in case of success and a negative error code * in case of failure. */ static int get_exclusive(struct ubi_volume_desc *desc) { int users, err; struct ubi_volume *vol = desc->vol; spin_lock(&vol->ubi->volumes_lock); users = vol->readers + vol->writers + vol->exclusive + vol->metaonly; ubi_assert(users > 0); if (users > 1) { ubi_err(vol->ubi, "%d users for volume %d", users, vol->vol_id); err = -EBUSY; } else { vol->readers = vol->writers = vol->metaonly = 0; vol->exclusive = 1; err = desc->mode; desc->mode = UBI_EXCLUSIVE; } spin_unlock(&vol->ubi->volumes_lock); return err; } /** * revoke_exclusive - revoke exclusive mode. * @desc: volume descriptor * @mode: new mode to switch to */ static void revoke_exclusive(struct ubi_volume_desc *desc, int mode) { struct ubi_volume *vol = desc->vol; spin_lock(&vol->ubi->volumes_lock); ubi_assert(vol->readers == 0 && vol->writers == 0 && vol->metaonly == 0); ubi_assert(vol->exclusive == 1 && desc->mode == UBI_EXCLUSIVE); vol->exclusive = 0; if (mode == UBI_READONLY) vol->readers = 1; else if (mode == UBI_READWRITE) vol->writers = 1; else if (mode == UBI_METAONLY) vol->metaonly = 1; else vol->exclusive = 1; spin_unlock(&vol->ubi->volumes_lock); desc->mode = mode; } static int vol_cdev_open(struct inode *inode, struct file *file) { struct ubi_volume_desc *desc; int vol_id = iminor(inode) - 1, mode, ubi_num; ubi_num = ubi_major2num(imajor(inode)); if (ubi_num < 0) return ubi_num; if (file->f_mode & FMODE_WRITE) mode = UBI_READWRITE; else mode = UBI_READONLY; dbg_gen("open device %d, volume %d, mode %d", ubi_num, vol_id, mode); desc = ubi_open_volume(ubi_num, vol_id, mode); if (IS_ERR(desc)) return PTR_ERR(desc); file->private_data = desc; return 0; } static int vol_cdev_release(struct inode *inode, struct file *file) { struct ubi_volume_desc *desc = file->private_data; struct ubi_volume *vol = desc->vol; dbg_gen("release device %d, volume %d, mode %d", vol->ubi->ubi_num, vol->vol_id, desc->mode); if (vol->updating) { ubi_warn(vol->ubi, "update of volume %d not finished, volume is damaged", vol->vol_id); ubi_assert(!vol->changing_leb); vol->updating = 0; vfree(vol->upd_buf); } else if (vol->changing_leb) { dbg_gen("only %lld of %lld bytes received for atomic LEB change for volume %d:%d, cancel", vol->upd_received, vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id); vol->changing_leb = 0; vfree(vol->upd_buf); } ubi_close_volume(desc); return 0; } static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin) { struct ubi_volume_desc *desc = file->private_data; struct ubi_volume *vol = desc->vol; if (vol->updating) { /* Update is in progress, seeking is prohibited */ ubi_err(vol->ubi, "updating"); return -EBUSY; } return fixed_size_llseek(file, offset, origin, vol->used_bytes); } static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct ubi_volume_desc *desc = file->private_data; struct ubi_device *ubi = desc->vol->ubi; struct inode *inode = file_inode(file); int err; inode_lock(inode); err = ubi_sync(ubi->ubi_num); inode_unlock(inode); return err; } static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count, loff_t *offp) { struct ubi_volume_desc *desc = file->private_data; struct ubi_volume *vol = desc->vol; struct ubi_device *ubi = vol->ubi; int err, lnum, off, len, tbuf_size; size_t count_save = count; void *tbuf; dbg_gen("read %zd bytes from offset %lld of volume %d", count, *offp, vol->vol_id); if (vol->updating) { ubi_err(vol->ubi, "updating"); return -EBUSY; } if (vol->upd_marker) { ubi_err(vol->ubi, "damaged volume, update marker is set"); return -EBADF; } if (*offp == vol->used_bytes || count == 0) return 0; if (vol->corrupted) dbg_gen("read from corrupted volume %d", vol->vol_id); if (*offp + count > vol->used_bytes) count_save = count = vol->used_bytes - *offp; tbuf_size = vol->usable_leb_size; if (count < tbuf_size) tbuf_size = ALIGN(count, ubi->min_io_size); tbuf = vmalloc(tbuf_size); if (!tbuf) return -ENOMEM; len = count > tbuf_size ? tbuf_size : count; lnum = div_u64_rem(*offp, vol->usable_leb_size, &off); do { cond_resched(); if (off + len >= vol->usable_leb_size) len = vol->usable_leb_size - off; err = ubi_eba_read_leb(ubi, vol, lnum, tbuf, off, len, 0); if (err) break; off += len; if (off == vol->usable_leb_size) { lnum += 1; off -= vol->usable_leb_size; } count -= len; *offp += len; err = copy_to_user(buf, tbuf, len); if (err) { err = -EFAULT; break; } buf += len; len = count > tbuf_size ? tbuf_size : count; } while (count); vfree(tbuf); return err ? err : count_save - count; } /* * This function allows to directly write to dynamic UBI volumes, without * issuing the volume update operation. */ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf, size_t count, loff_t *offp) { struct ubi_volume_desc *desc = file->private_data; struct ubi_volume *vol = desc->vol; struct ubi_device *ubi = vol->ubi; int lnum, off, len, tbuf_size, err = 0; size_t count_save = count; char *tbuf; if (!vol->direct_writes) return -EPERM; dbg_gen("requested: write %zd bytes to offset %lld of volume %u", count, *offp, vol->vol_id); if (vol->vol_type == UBI_STATIC_VOLUME) return -EROFS; lnum = div_u64_rem(*offp, vol->usable_leb_size, &off); if (off & (ubi->min_io_size - 1)) { ubi_err(ubi, "unaligned position"); return -EINVAL; } if (*offp + count > vol->used_bytes) count_save = count = vol->used_bytes - *offp; /* We can write only in fractions of the minimum I/O unit */ if (count & (ubi->min_io_size - 1)) { ubi_err(ubi, "unaligned write length"); return -EINVAL; } tbuf_size = vol->usable_leb_size; if (count < tbuf_size) tbuf_size = ALIGN(count, ubi->min_io_size); tbuf = vmalloc(tbuf_size); if (!tbuf) return -ENOMEM; len = count > tbuf_size ? tbuf_size : count; while (count) { cond_resched(); if (off + len >= vol->usable_leb_size) len = vol->usable_leb_size - off; err = copy_from_user(tbuf, buf, len); if (err) { err = -EFAULT; break; } err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len); if (err) break; off += len; if (off == vol->usable_leb_size) { lnum += 1; off -= vol->usable_leb_size; } count -= len; *offp += len; buf += len; len = count > tbuf_size ? tbuf_size : count; } vfree(tbuf); return err ? err : count_save - count; } static ssize_t vol_cdev_write(struct file *file, const char __user *buf, size_t count, loff_t *offp) { int err = 0; struct ubi_volume_desc *desc = file->private_data; struct ubi_volume *vol = desc->vol; struct ubi_device *ubi = vol->ubi; if (!vol->updating && !vol->changing_leb) return vol_cdev_direct_write(file, buf, count, offp); if (vol->updating) err = ubi_more_update_data(ubi, vol, buf, count); else err = ubi_more_leb_change_data(ubi, vol, buf, count); if (err < 0) { ubi_err(ubi, "cannot accept more %zd bytes of data, error %d", count, err); return err; } if (err) { /* * The operation is finished, @err contains number of actually * written bytes. */ count = err; if (vol->changing_leb) { revoke_exclusive(desc, UBI_READWRITE); return count; } /* * We voluntarily do not take into account the skip_check flag * as we want to make sure what we wrote was correctly written. */ err = ubi_check_volume(ubi, vol->vol_id); if (err < 0) return err; if (err) { ubi_warn(ubi, "volume %d on UBI device %d is corrupted", vol->vol_id, ubi->ubi_num); vol->corrupted = 1; } vol->checked = 1; ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED); revoke_exclusive(desc, UBI_READWRITE); } return count; } static long vol_cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int err = 0; struct ubi_volume_desc *desc = file->private_data; struct ubi_volume *vol = desc->vol; struct ubi_device *ubi = vol->ubi; void __user *argp = (void __user *)arg; switch (cmd) { /* Volume update command */ case UBI_IOCVOLUP: { int64_t bytes, rsvd_bytes; if (!capable(CAP_SYS_RESOURCE)) { err = -EPERM; break; } err = copy_from_user(&bytes, argp, sizeof(int64_t)); if (err) { err = -EFAULT; break; } if (desc->mode == UBI_READONLY) { err = -EROFS; break; } rsvd_bytes = (long long)vol->reserved_pebs * vol->usable_leb_size; if (bytes < 0 || bytes > rsvd_bytes) { err = -EINVAL; break; } err = get_exclusive(desc); if (err < 0) break; err = ubi_start_update(ubi, vol, bytes); if (bytes == 0) { ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED); revoke_exclusive(desc, UBI_READWRITE); } break; } /* Atomic logical eraseblock change command */ case UBI_IOCEBCH: { struct ubi_leb_change_req req; err = copy_from_user(&req, argp, sizeof(struct ubi_leb_change_req)); if (err) { err = -EFAULT; break; } if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) { err = -EROFS; break; } /* Validate the request */ err = -EINVAL; if (!ubi_leb_valid(vol, req.lnum) || req.bytes < 0 || req.bytes > vol->usable_leb_size) break; err = get_exclusive(desc); if (err < 0) break; err = ubi_start_leb_change(ubi, vol, &req); if (req.bytes == 0) revoke_exclusive(desc, UBI_READWRITE); break; } /* Logical eraseblock erasure command */ case UBI_IOCEBER: { int32_t lnum; err = get_user(lnum, (__user int32_t *)argp); if (err) { err = -EFAULT; break; } if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) { err = -EROFS; break; } if (!ubi_leb_valid(vol, lnum)) { err = -EINVAL; break; } dbg_gen("erase LEB %d:%d", vol->vol_id, lnum); err = ubi_eba_unmap_leb(ubi, vol, lnum); if (err) break; err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL); break; } /* Logical eraseblock map command */ case UBI_IOCEBMAP: { struct ubi_map_req req; err = copy_from_user(&req, argp, sizeof(struct ubi_map_req)); if (err) { err = -EFAULT; break; } err = ubi_leb_map(desc, req.lnum); break; } /* Logical eraseblock un-map command */ case UBI_IOCEBUNMAP: { int32_t lnum; err = get_user(lnum, (__user int32_t *)argp); if (err) { err = -EFAULT; break; } err = ubi_leb_unmap(desc, lnum); break; } /* Check if logical eraseblock is mapped command */ case UBI_IOCEBISMAP: { int32_t lnum; err = get_user(lnum, (__user int32_t *)argp); if (err) { err = -EFAULT; break; } err = ubi_is_mapped(desc, lnum); break; } /* Set volume property command */ case UBI_IOCSETVOLPROP: { struct ubi_set_vol_prop_req req; err = copy_from_user(&req, argp, sizeof(struct ubi_set_vol_prop_req)); if (err) { err = -EFAULT; break; } switch (req.property) { case UBI_VOL_PROP_DIRECT_WRITE: mutex_lock(&ubi->device_mutex); desc->vol->direct_writes = !!req.value; mutex_unlock(&ubi->device_mutex); break; default: err = -EINVAL; break; } break; } /* Create a R/O block device on top of the UBI volume */ case UBI_IOCVOLCRBLK: { struct ubi_volume_info vi; ubi_get_volume_info(desc, &vi); err = ubiblock_create(&vi); break; } /* Remove the R/O block device */ case UBI_IOCVOLRMBLK: { struct ubi_volume_info vi; ubi_get_volume_info(desc, &vi); err = ubiblock_remove(&vi); break; } default: err = -ENOTTY; break; } return err; } /** * verify_mkvol_req - verify volume creation request. * @ubi: UBI device description object * @req: the request to check * * This function zero if the request is correct, and %-EINVAL if not. */ static int verify_mkvol_req(const struct ubi_device *ubi, const struct ubi_mkvol_req *req) { int n, err = -EINVAL; if (req->bytes < 0 || req->alignment < 0 || req->vol_type < 0 || req->name_len < 0) goto bad; if ((req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) && req->vol_id != UBI_VOL_NUM_AUTO) goto bad; if (req->alignment == 0) goto bad; if (req->bytes == 0) goto bad; if (req->vol_type != UBI_DYNAMIC_VOLUME && req->vol_type != UBI_STATIC_VOLUME) goto bad; if (req->flags & ~UBI_VOL_VALID_FLGS) goto bad; if (req->flags & UBI_VOL_SKIP_CRC_CHECK_FLG && req->vol_type != UBI_STATIC_VOLUME) goto bad; if (req->alignment > ubi->leb_size) goto bad; n = req->alignment & (ubi->min_io_size - 1); if (req->alignment != 1 && n) goto bad; if (!req->name[0] || !req->name_len) goto bad; if (req->name_len > UBI_VOL_NAME_MAX) { err = -ENAMETOOLONG; goto bad; } n = strnlen(req->name, req->name_len + 1); if (n != req->name_len) goto bad; return 0; bad: ubi_err(ubi, "bad volume creation request"); ubi_dump_mkvol_req(req); return err; } /** * verify_rsvol_req - verify volume re-size request. * @ubi: UBI device description object * @req: the request to check * * This function returns zero if the request is correct, and %-EINVAL if not. */ static int verify_rsvol_req(const struct ubi_device *ubi, const struct ubi_rsvol_req *req) { if (req->bytes <= 0) return -EINVAL; if (req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) return -EINVAL; return 0; } /** * rename_volumes - rename UBI volumes. * @ubi: UBI device description object * @req: volumes re-name request * * This is a helper function for the volume re-name IOCTL which validates the * request, opens the volume and calls corresponding volumes management * function. Returns zero in case of success and a negative error code in case * of failure. */ static int rename_volumes(struct ubi_device *ubi, struct ubi_rnvol_req *req) { int i, n, err; struct list_head rename_list; struct ubi_rename_entry *re, *re1; if (req->count < 0 || req->count > UBI_MAX_RNVOL) return -EINVAL; if (req->count == 0) return 0; /* Validate volume IDs and names in the request */ for (i = 0; i < req->count; i++) { if (req->ents[i].vol_id < 0 || req->ents[i].vol_id >= ubi->vtbl_slots) return -EINVAL; if (req->ents[i].name_len < 0) return -EINVAL; if (req->ents[i].name_len > UBI_VOL_NAME_MAX) return -ENAMETOOLONG; req->ents[i].name[req->ents[i].name_len] = '\0'; n = strlen(req->ents[i].name); if (n != req->ents[i].name_len) return -EINVAL; } /* Make sure volume IDs and names are unique */ for (i = 0; i < req->count - 1; i++) { for (n = i + 1; n < req->count; n++) { if (req->ents[i].vol_id == req->ents[n].vol_id) { ubi_err(ubi, "duplicated volume id %d", req->ents[i].vol_id); return -EINVAL; } if (!strcmp(req->ents[i].name, req->ents[n].name)) { ubi_err(ubi, "duplicated volume name \"%s\"", req->ents[i].name); return -EINVAL; } } } /* Create the re-name list */ INIT_LIST_HEAD(&rename_list); for (i = 0; i < req->count; i++) { int vol_id = req->ents[i].vol_id; int name_len = req->ents[i].name_len; const char *name = req->ents[i].name; re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL); if (!re) { err = -ENOMEM; goto out_free; } re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_METAONLY); if (IS_ERR(re->desc)) { err = PTR_ERR(re->desc); ubi_err(ubi, "cannot open volume %d, error %d", vol_id, err); kfree(re); goto out_free; } /* Skip this re-naming if the name does not really change */ if (re->desc->vol->name_len == name_len && !memcmp(re->desc->vol->name, name, name_len)) { ubi_close_volume(re->desc); kfree(re); continue; } re->new_name_len = name_len; memcpy(re->new_name, name, name_len); list_add_tail(&re->list, &rename_list); dbg_gen("will rename volume %d from \"%s\" to \"%s\"", vol_id, re->desc->vol->name, name); } if (list_empty(&rename_list)) return 0; /* Find out the volumes which have to be removed */ list_for_each_entry(re, &rename_list, list) { struct ubi_volume_desc *desc; int no_remove_needed = 0; /* * Volume @re->vol_id is going to be re-named to * @re->new_name, while its current name is @name. If a volume * with name @re->new_name currently exists, it has to be * removed, unless it is also re-named in the request (@req). */ list_for_each_entry(re1, &rename_list, list) { if (re->new_name_len == re1->desc->vol->name_len && !memcmp(re->new_name, re1->desc->vol->name, re1->desc->vol->name_len)) { no_remove_needed = 1; break; } } if (no_remove_needed) continue; /* * It seems we need to remove volume with name @re->new_name, * if it exists. */ desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name, UBI_EXCLUSIVE); if (IS_ERR(desc)) { err = PTR_ERR(desc); if (err == -ENODEV) /* Re-naming into a non-existing volume name */ continue; /* The volume exists but busy, or an error occurred */ ubi_err(ubi, "cannot open volume \"%s\", error %d", re->new_name, err); goto out_free; } re1 = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL); if (!re1) { err = -ENOMEM; ubi_close_volume(desc); goto out_free; } re1->remove = 1; re1->desc = desc; list_add(&re1->list, &rename_list); dbg_gen("will remove volume %d, name \"%s\"", re1->desc->vol->vol_id, re1->desc->vol->name); } mutex_lock(&ubi->device_mutex); err = ubi_rename_volumes(ubi, &rename_list); mutex_unlock(&ubi->device_mutex); out_free: list_for_each_entry_safe(re, re1, &rename_list, list) { ubi_close_volume(re->desc); list_del(&re->list); kfree(re); } return err; } static int ubi_get_ec_info(struct ubi_device *ubi, struct ubi_ecinfo_req __user *ureq) { struct ubi_ecinfo_req req; struct ubi_wl_entry *wl; int read_cnt; int peb; int end_peb; /* Copy the input arguments */ if (copy_from_user(&req, ureq, sizeof(struct ubi_ecinfo_req))) return -EFAULT; /* Check input arguments */ if (req.length <= 0 || req.start < 0 || req.start >= ubi->peb_count) return -EINVAL; if (check_add_overflow(req.start, req.length, &end_peb)) return -EINVAL; if (end_peb > ubi->peb_count) end_peb = ubi->peb_count; /* Check access rights before filling erase_counters array */ if (!access_ok((void __user *)ureq->erase_counters, (end_peb-req.start) * sizeof(int32_t))) return -EFAULT; /* Fill erase counter array */ read_cnt = 0; for (peb = req.start; peb < end_peb; read_cnt++, peb++) { int ec; if (ubi_io_is_bad(ubi, peb)) { if (__put_user(UBI_UNKNOWN, ureq->erase_counters+read_cnt)) return -EFAULT; continue; } spin_lock(&ubi->wl_lock); wl = ubi->lookuptbl[peb]; if (wl) ec = wl->ec; else ec = UBI_UNKNOWN; spin_unlock(&ubi->wl_lock); if (__put_user(ec, ureq->erase_counters+read_cnt)) return -EFAULT; } /* Return actual read length */ req.read_length = read_cnt; /* Copy everything except erase counter array */ if (copy_to_user(ureq, &req, sizeof(struct ubi_ecinfo_req))) return -EFAULT; return 0; } static long ubi_cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int err = 0; struct ubi_device *ubi; struct ubi_volume_desc *desc; void __user *argp = (void __user *)arg; if (!capable(CAP_SYS_RESOURCE)) return -EPERM; ubi = ubi_get_by_major(imajor(file->f_mapping->host)); if (!ubi) return -ENODEV; switch (cmd) { /* Create volume command */ case UBI_IOCMKVOL: { struct ubi_mkvol_req req; dbg_gen("create volume"); err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req)); if (err) { err = -EFAULT; break; } err = verify_mkvol_req(ubi, &req); if (err) break; mutex_lock(&ubi->device_mutex); err = ubi_create_volume(ubi, &req); mutex_unlock(&ubi->device_mutex); if (err) break; err = put_user(req.vol_id, (__user int32_t *)argp); if (err) err = -EFAULT; break; } /* Remove volume command */ case UBI_IOCRMVOL: { int vol_id; dbg_gen("remove volume"); err = get_user(vol_id, (__user int32_t *)argp); if (err) { err = -EFAULT; break; } desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE); if (IS_ERR(desc)) { err = PTR_ERR(desc); break; } mutex_lock(&ubi->device_mutex); err = ubi_remove_volume(desc, 0); mutex_unlock(&ubi->device_mutex); /* * The volume is deleted (unless an error occurred), and the * 'struct ubi_volume' object will be freed when * 'ubi_close_volume()' will call 'put_device()'. */ ubi_close_volume(desc); break; } /* Re-size volume command */ case UBI_IOCRSVOL: { int pebs; struct ubi_rsvol_req req; dbg_gen("re-size volume"); err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req)); if (err) { err = -EFAULT; break; } err = verify_rsvol_req(ubi, &req); if (err) break; desc = ubi_open_volume(ubi->ubi_num, req.vol_id, UBI_EXCLUSIVE); if (IS_ERR(desc)) { err = PTR_ERR(desc); break; } pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1, desc->vol->usable_leb_size); mutex_lock(&ubi->device_mutex); err = ubi_resize_volume(desc, pebs); mutex_unlock(&ubi->device_mutex); ubi_close_volume(desc); break; } /* Re-name volumes command */ case UBI_IOCRNVOL: { struct ubi_rnvol_req *req; dbg_gen("re-name volumes"); req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL); if (!req) { err = -ENOMEM; break; } err = copy_from_user(req, argp, sizeof(struct ubi_rnvol_req)); if (err) { err = -EFAULT; kfree(req); break; } err = rename_volumes(ubi, req); kfree(req); break; } /* Check a specific PEB for bitflips and scrub it if needed */ case UBI_IOCRPEB: { int pnum; err = get_user(pnum, (__user int32_t *)argp); if (err) { err = -EFAULT; break; } err = ubi_bitflip_check(ubi, pnum, 0); break; } /* Force scrubbing for a specific PEB */ case UBI_IOCSPEB: { int pnum; err = get_user(pnum, (__user int32_t *)argp); if (err) { err = -EFAULT; break; } err = ubi_bitflip_check(ubi, pnum, 1); break; } case UBI_IOCECNFO: { err = ubi_get_ec_info(ubi, argp); break; } default: err = -ENOTTY; break; } ubi_put_device(ubi); return err; } static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int err = 0; void __user *argp = (void __user *)arg; if (!capable(CAP_SYS_RESOURCE)) return -EPERM; switch (cmd) { /* Attach an MTD device command */ case UBI_IOCATT: { struct ubi_attach_req req; struct mtd_info *mtd; dbg_gen("attach MTD device"); err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req)); if (err) { err = -EFAULT; break; } if (req.mtd_num < 0 || (req.ubi_num < 0 && req.ubi_num != UBI_DEV_NUM_AUTO)) { err = -EINVAL; break; } mtd = get_mtd_device(NULL, req.mtd_num); if (IS_ERR(mtd)) { err = PTR_ERR(mtd); break; } /* * Note, further request verification is done by * 'ubi_attach_mtd_dev()'. */ mutex_lock(&ubi_devices_mutex); err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset, req.max_beb_per1024, !!req.disable_fm, !!req.need_resv_pool); mutex_unlock(&ubi_devices_mutex); if (err < 0) put_mtd_device(mtd); else /* @err contains UBI device number */ err = put_user(err, (__user int32_t *)argp); break; } /* Detach an MTD device command */ case UBI_IOCDET: { int ubi_num; dbg_gen("detach MTD device"); err = get_user(ubi_num, (__user int32_t *)argp); if (err) { err = -EFAULT; break; } mutex_lock(&ubi_devices_mutex); err = ubi_detach_mtd_dev(ubi_num, 0); mutex_unlock(&ubi_devices_mutex); break; } default: err = -ENOTTY; break; } return err; } /* UBI volume character device operations */ const struct file_operations ubi_vol_cdev_operations = { .owner = THIS_MODULE, .open = vol_cdev_open, .release = vol_cdev_release, .llseek = vol_cdev_llseek, .read = vol_cdev_read, .write = vol_cdev_write, .fsync = vol_cdev_fsync, .unlocked_ioctl = vol_cdev_ioctl, .compat_ioctl = compat_ptr_ioctl, }; /* UBI character device operations */ const struct file_operations ubi_cdev_operations = { .owner = THIS_MODULE, .unlocked_ioctl = ubi_cdev_ioctl, .compat_ioctl = compat_ptr_ioctl, }; /* UBI control character device operations */ const struct file_operations ubi_ctrl_cdev_operations = { .owner = THIS_MODULE, .unlocked_ioctl = ctrl_cdev_ioctl, .compat_ioctl = compat_ptr_ioctl, };
10 1 10 10 1 1 2 1 10 1 1 11 11 1 1 10 10 5 10 2 8 8 8 1 1 1 1 1 8 1 7 1 6 6 5 11 7 9 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for some logitech "special" devices * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2008 Jiri Slaby * Copyright (c) 2010 Hendrik Iben */ /* */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/usb.h> #include <linux/wait.h> #include "usbhid/usbhid.h" #include "hid-ids.h" #include "hid-lg.h" #include "hid-lg4ff.h" #define LG_RDESC 0x001 #define LG_BAD_RELATIVE_KEYS 0x002 #define LG_DUPLICATE_USAGES 0x004 #define LG_EXPANDED_KEYMAP 0x010 #define LG_IGNORE_DOUBLED_WHEEL 0x020 #define LG_WIRELESS 0x040 #define LG_INVERT_HWHEEL 0x080 #define LG_NOGET 0x100 #define LG_FF 0x200 #define LG_FF2 0x400 #define LG_RDESC_REL_ABS 0x800 #define LG_FF3 0x1000 #define LG_FF4 0x2000 /* Size of the original descriptors of the Driving Force (and Pro) wheels */ #define DF_RDESC_ORIG_SIZE 130 #define DFP_RDESC_ORIG_SIZE 97 #define FV_RDESC_ORIG_SIZE 130 #define MOMO_RDESC_ORIG_SIZE 87 #define MOMO2_RDESC_ORIG_SIZE 87 #define FFG_RDESC_ORIG_SIZE 85 #define FG_RDESC_ORIG_SIZE 82 /* Fixed report descriptors for Logitech Driving Force (and Pro) * wheel controllers * * The original descriptors hide the separate throttle and brake axes in * a custom vendor usage page, providing only a combined value as * GenericDesktop.Y. * These descriptors remove the combined Y axis and instead report * separate throttle (Y) and brake (RZ). */ static const __u8 df_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystick), */ 0xA1, 0x01, /* Collection (Application), */ 0xA1, 0x02, /* Collection (Logical), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x0A, /* Report Size (10), */ 0x14, /* Logical Minimum (0), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x34, /* Physical Minimum (0), */ 0x46, 0xFF, 0x03, /* Physical Maximum (1023), */ 0x09, 0x30, /* Usage (X), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x0C, /* Report Count (12), */ 0x75, 0x01, /* Report Size (1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x05, 0x09, /* Usage (Buttons), */ 0x19, 0x01, /* Usage Minimum (1), */ 0x29, 0x0c, /* Usage Maximum (12), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x02, /* Report Count (2), */ 0x06, 0x00, 0xFF, /* Usage Page (Vendor: 65280), */ 0x09, 0x01, /* Usage (?: 1), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x08, /* Report Size (8), */ 0x81, 0x02, /* Input (Variable), */ 0x25, 0x07, /* Logical Maximum (7), */ 0x46, 0x3B, 0x01, /* Physical Maximum (315), */ 0x75, 0x04, /* Report Size (4), */ 0x65, 0x14, /* Unit (Degrees), */ 0x09, 0x39, /* Usage (Hat Switch), */ 0x81, 0x42, /* Input (Variable, Null State), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x04, /* Report Count (4), */ 0x65, 0x00, /* Unit (none), */ 0x06, 0x00, 0xFF, /* Usage Page (Vendor: 65280), */ 0x09, 0x01, /* Usage (?: 1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x08, /* Report Size (8), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x09, 0x31, /* Usage (Y), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x35, /* Usage (Rz), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xA1, 0x02, /* Collection (Logical), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x95, 0x07, /* Report Count (7), */ 0x75, 0x08, /* Report Size (8), */ 0x09, 0x03, /* Usage (?: 3), */ 0x91, 0x02, /* Output (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; static const __u8 dfp_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystick), */ 0xA1, 0x01, /* Collection (Application), */ 0xA1, 0x02, /* Collection (Logical), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x0E, /* Report Size (14), */ 0x14, /* Logical Minimum (0), */ 0x26, 0xFF, 0x3F, /* Logical Maximum (16383), */ 0x34, /* Physical Minimum (0), */ 0x46, 0xFF, 0x3F, /* Physical Maximum (16383), */ 0x09, 0x30, /* Usage (X), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x0E, /* Report Count (14), */ 0x75, 0x01, /* Report Size (1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x0E, /* Usage Maximum (0Eh), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x04, /* Report Size (4), */ 0x25, 0x07, /* Logical Maximum (7), */ 0x46, 0x3B, 0x01, /* Physical Maximum (315), */ 0x65, 0x14, /* Unit (Degrees), */ 0x09, 0x39, /* Usage (Hat Switch), */ 0x81, 0x42, /* Input (Variable, Nullstate), */ 0x65, 0x00, /* Unit, */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x75, 0x08, /* Report Size (8), */ 0x81, 0x01, /* Input (Constant), */ 0x09, 0x31, /* Usage (Y), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x35, /* Usage (Rz), */ 0x81, 0x02, /* Input (Variable), */ 0x81, 0x01, /* Input (Constant), */ 0xC0, /* End Collection, */ 0xA1, 0x02, /* Collection (Logical), */ 0x09, 0x02, /* Usage (02h), */ 0x95, 0x07, /* Report Count (7), */ 0x91, 0x02, /* Output (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; static const __u8 fv_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystick), */ 0xA1, 0x01, /* Collection (Application), */ 0xA1, 0x02, /* Collection (Logical), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x0A, /* Report Size (10), */ 0x15, 0x00, /* Logical Minimum (0), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x35, 0x00, /* Physical Minimum (0), */ 0x46, 0xFF, 0x03, /* Physical Maximum (1023), */ 0x09, 0x30, /* Usage (X), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x0C, /* Report Count (12), */ 0x75, 0x01, /* Report Size (1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x0C, /* Usage Maximum (0Ch), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x02, /* Report Count (2), */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x09, 0x01, /* Usage (01h), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x02, /* Usage (02h), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x08, /* Report Size (8), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x25, 0x07, /* Logical Maximum (7), */ 0x46, 0x3B, 0x01, /* Physical Maximum (315), */ 0x75, 0x04, /* Report Size (4), */ 0x65, 0x14, /* Unit (Degrees), */ 0x09, 0x39, /* Usage (Hat Switch), */ 0x81, 0x42, /* Input (Variable, Null State), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x04, /* Report Count (4), */ 0x65, 0x00, /* Unit, */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x09, 0x01, /* Usage (01h), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x08, /* Report Size (8), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x09, 0x31, /* Usage (Y), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x32, /* Usage (Z), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xA1, 0x02, /* Collection (Logical), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x95, 0x07, /* Report Count (7), */ 0x75, 0x08, /* Report Size (8), */ 0x09, 0x03, /* Usage (03h), */ 0x91, 0x02, /* Output (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; static const __u8 momo_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystick), */ 0xA1, 0x01, /* Collection (Application), */ 0xA1, 0x02, /* Collection (Logical), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x0A, /* Report Size (10), */ 0x15, 0x00, /* Logical Minimum (0), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x35, 0x00, /* Physical Minimum (0), */ 0x46, 0xFF, 0x03, /* Physical Maximum (1023), */ 0x09, 0x30, /* Usage (X), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x08, /* Report Count (8), */ 0x75, 0x01, /* Report Size (1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x08, /* Usage Maximum (08h), */ 0x81, 0x02, /* Input (Variable), */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x75, 0x0E, /* Report Size (14), */ 0x95, 0x01, /* Report Count (1), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x09, 0x00, /* Usage (00h), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x75, 0x08, /* Report Size (8), */ 0x09, 0x31, /* Usage (Y), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x32, /* Usage (Z), */ 0x81, 0x02, /* Input (Variable), */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x09, 0x01, /* Usage (01h), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xA1, 0x02, /* Collection (Logical), */ 0x09, 0x02, /* Usage (02h), */ 0x95, 0x07, /* Report Count (7), */ 0x91, 0x02, /* Output (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; static const __u8 momo2_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystick), */ 0xA1, 0x01, /* Collection (Application), */ 0xA1, 0x02, /* Collection (Logical), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x0A, /* Report Size (10), */ 0x15, 0x00, /* Logical Minimum (0), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x35, 0x00, /* Physical Minimum (0), */ 0x46, 0xFF, 0x03, /* Physical Maximum (1023), */ 0x09, 0x30, /* Usage (X), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x0A, /* Report Count (10), */ 0x75, 0x01, /* Report Size (1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x0A, /* Usage Maximum (0Ah), */ 0x81, 0x02, /* Input (Variable), */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x09, 0x00, /* Usage (00h), */ 0x95, 0x04, /* Report Count (4), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x08, /* Report Size (8), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x09, 0x01, /* Usage (01h), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x31, /* Usage (Y), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x32, /* Usage (Z), */ 0x81, 0x02, /* Input (Variable), */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x09, 0x00, /* Usage (00h), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xA1, 0x02, /* Collection (Logical), */ 0x09, 0x02, /* Usage (02h), */ 0x95, 0x07, /* Report Count (7), */ 0x91, 0x02, /* Output (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; static const __u8 ffg_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystik), */ 0xA1, 0x01, /* Collection (Application), */ 0xA1, 0x02, /* Collection (Logical), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x0A, /* Report Size (10), */ 0x15, 0x00, /* Logical Minimum (0), */ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ 0x35, 0x00, /* Physical Minimum (0), */ 0x46, 0xFF, 0x03, /* Physical Maximum (1023), */ 0x09, 0x30, /* Usage (X), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x06, /* Report Count (6), */ 0x75, 0x01, /* Report Size (1), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x06, /* Usage Maximum (06h), */ 0x81, 0x02, /* Input (Variable), */ 0x95, 0x01, /* Report Count (1), */ 0x75, 0x08, /* Report Size (8), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x09, 0x01, /* Usage (01h), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x81, 0x01, /* Input (Constant), */ 0x09, 0x31, /* Usage (Y), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x32, /* Usage (Z), */ 0x81, 0x02, /* Input (Variable), */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x09, 0x01, /* Usage (01h), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xA1, 0x02, /* Collection (Logical), */ 0x09, 0x02, /* Usage (02h), */ 0x95, 0x07, /* Report Count (7), */ 0x91, 0x02, /* Output (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; static const __u8 fg_rdesc_fixed[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystik), */ 0xA1, 0x01, /* Collection (Application), */ 0xA1, 0x02, /* Collection (Logical), */ 0x15, 0x00, /* Logical Minimum (0), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x35, 0x00, /* Physical Minimum (0), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x01, /* Report Count (1), */ 0x09, 0x30, /* Usage (X), */ 0x81, 0x02, /* Input (Variable), */ 0xA4, /* Push, */ 0x25, 0x01, /* Logical Maximum (1), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x02, /* Report Count (2), */ 0x81, 0x01, /* Input (Constant), */ 0x95, 0x06, /* Report Count (6), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x06, /* Usage Maximum (06h), */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0xB4, /* Pop, */ 0x81, 0x02, /* Input (Constant), */ 0x09, 0x31, /* Usage (Y), */ 0x81, 0x02, /* Input (Variable), */ 0x09, 0x32, /* Usage (Z), */ 0x81, 0x02, /* Input (Variable), */ 0xC0, /* End Collection, */ 0xA1, 0x02, /* Collection (Logical), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x04, /* Report Count (4), */ 0x09, 0x02, /* Usage (02h), */ 0xB1, 0x02, /* Feature (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection, */ }; /* * Certain Logitech keyboards send in report #3 keys which are far * above the logical maximum described in descriptor. This extends * the original value of 0x28c of logical maximum to 0x104d */ static const __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { struct lg_drv_data *drv_data = hid_get_drvdata(hdev); if ((drv_data->quirks & LG_RDESC) && *rsize >= 91 && rdesc[83] == 0x26 && rdesc[84] == 0x8c && rdesc[85] == 0x02) { hid_info(hdev, "fixing up Logitech keyboard report descriptor\n"); rdesc[84] = rdesc[89] = 0x4d; rdesc[85] = rdesc[90] = 0x10; } if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 51 && rdesc[32] == 0x81 && rdesc[33] == 0x06 && rdesc[49] == 0x81 && rdesc[50] == 0x06) { hid_info(hdev, "fixing up rel/abs in Logitech report descriptor\n"); rdesc[33] = rdesc[50] = 0x02; } switch (hdev->product) { case USB_DEVICE_ID_LOGITECH_WINGMAN_FG: if (*rsize == FG_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Wingman Formula GP report descriptor\n"); *rsize = sizeof(fg_rdesc_fixed); return fg_rdesc_fixed; } else { hid_info(hdev, "rdesc size test failed for formula gp\n"); } break; case USB_DEVICE_ID_LOGITECH_WINGMAN_FFG: if (*rsize == FFG_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Wingman Formula Force GP report descriptor\n"); *rsize = sizeof(ffg_rdesc_fixed); return ffg_rdesc_fixed; } break; /* Several wheels report as this id when operating in emulation mode. */ case USB_DEVICE_ID_LOGITECH_WHEEL: if (*rsize == DF_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Driving Force report descriptor\n"); *rsize = sizeof(df_rdesc_fixed); return df_rdesc_fixed; } break; case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL: if (*rsize == MOMO_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Momo Force (Red) report descriptor\n"); *rsize = sizeof(momo_rdesc_fixed); return momo_rdesc_fixed; } break; case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2: if (*rsize == MOMO2_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Momo Racing Force (Black) report descriptor\n"); *rsize = sizeof(momo2_rdesc_fixed); return momo2_rdesc_fixed; } break; case USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL: if (*rsize == FV_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Formula Vibration report descriptor\n"); *rsize = sizeof(fv_rdesc_fixed); return fv_rdesc_fixed; } break; case USB_DEVICE_ID_LOGITECH_DFP_WHEEL: if (*rsize == DFP_RDESC_ORIG_SIZE) { hid_info(hdev, "fixing up Logitech Driving Force Pro report descriptor\n"); *rsize = sizeof(dfp_rdesc_fixed); return dfp_rdesc_fixed; } break; case USB_DEVICE_ID_LOGITECH_WII_WHEEL: if (*rsize >= 101 && rdesc[41] == 0x95 && rdesc[42] == 0x0B && rdesc[47] == 0x05 && rdesc[48] == 0x09) { hid_info(hdev, "fixing up Logitech Speed Force Wireless report descriptor\n"); rdesc[41] = 0x05; rdesc[42] = 0x09; rdesc[47] = 0x95; rdesc[48] = 0x0B; } break; } return rdesc; } #define lg_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ EV_KEY, (c)) static int lg_ultrax_remote_mapping(struct hid_input *hi, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR) return 0; set_bit(EV_REP, hi->input->evbit); switch (usage->hid & HID_USAGE) { /* Reported on Logitech Ultra X Media Remote */ case 0x004: lg_map_key_clear(KEY_AGAIN); break; case 0x00d: lg_map_key_clear(KEY_HOME); break; case 0x024: lg_map_key_clear(KEY_SHUFFLE); break; case 0x025: lg_map_key_clear(KEY_TV); break; case 0x026: lg_map_key_clear(KEY_MENU); break; case 0x031: lg_map_key_clear(KEY_AUDIO); break; case 0x032: lg_map_key_clear(KEY_TEXT); break; case 0x033: lg_map_key_clear(KEY_LAST); break; case 0x047: lg_map_key_clear(KEY_MP3); break; case 0x048: lg_map_key_clear(KEY_DVD); break; case 0x049: lg_map_key_clear(KEY_MEDIA); break; case 0x04a: lg_map_key_clear(KEY_VIDEO); break; case 0x04b: lg_map_key_clear(KEY_ANGLE); break; case 0x04c: lg_map_key_clear(KEY_LANGUAGE); break; case 0x04d: lg_map_key_clear(KEY_SUBTITLE); break; case 0x051: lg_map_key_clear(KEY_RED); break; case 0x052: lg_map_key_clear(KEY_CLOSE); break; default: return 0; } return 1; } static int lg_wireless_mapping(struct hid_input *hi, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER) return 0; switch (usage->hid & HID_USAGE) { case 0x1001: lg_map_key_clear(KEY_MESSENGER); break; case 0x1003: lg_map_key_clear(KEY_SOUND); break; case 0x1004: lg_map_key_clear(KEY_VIDEO); break; case 0x1005: lg_map_key_clear(KEY_AUDIO); break; case 0x100a: lg_map_key_clear(KEY_DOCUMENTS); break; /* The following two entries are Playlist 1 and 2 on the MX3200 */ case 0x100f: lg_map_key_clear(KEY_FN_1); break; case 0x1010: lg_map_key_clear(KEY_FN_2); break; case 0x1011: lg_map_key_clear(KEY_PREVIOUSSONG); break; case 0x1012: lg_map_key_clear(KEY_NEXTSONG); break; case 0x1013: lg_map_key_clear(KEY_CAMERA); break; case 0x1014: lg_map_key_clear(KEY_MESSENGER); break; case 0x1015: lg_map_key_clear(KEY_RECORD); break; case 0x1016: lg_map_key_clear(KEY_PLAYER); break; case 0x1017: lg_map_key_clear(KEY_EJECTCD); break; case 0x1018: lg_map_key_clear(KEY_MEDIA); break; case 0x1019: lg_map_key_clear(KEY_PROG1); break; case 0x101a: lg_map_key_clear(KEY_PROG2); break; case 0x101b: lg_map_key_clear(KEY_PROG3); break; case 0x101c: lg_map_key_clear(KEY_CYCLEWINDOWS); break; case 0x101f: lg_map_key_clear(KEY_ZOOMIN); break; case 0x1020: lg_map_key_clear(KEY_ZOOMOUT); break; case 0x1021: lg_map_key_clear(KEY_ZOOMRESET); break; case 0x1023: lg_map_key_clear(KEY_CLOSE); break; case 0x1027: lg_map_key_clear(KEY_MENU); break; /* this one is marked as 'Rotate' */ case 0x1028: lg_map_key_clear(KEY_ANGLE); break; case 0x1029: lg_map_key_clear(KEY_SHUFFLE); break; case 0x102a: lg_map_key_clear(KEY_BACK); break; case 0x102b: lg_map_key_clear(KEY_CYCLEWINDOWS); break; case 0x102d: lg_map_key_clear(KEY_WWW); break; /* The following two are 'Start/answer call' and 'End/reject call' on the MX3200 */ case 0x1031: lg_map_key_clear(KEY_OK); break; case 0x1032: lg_map_key_clear(KEY_CANCEL); break; case 0x1041: lg_map_key_clear(KEY_BATTERY); break; case 0x1042: lg_map_key_clear(KEY_WORDPROCESSOR); break; case 0x1043: lg_map_key_clear(KEY_SPREADSHEET); break; case 0x1044: lg_map_key_clear(KEY_PRESENTATION); break; case 0x1045: lg_map_key_clear(KEY_UNDO); break; case 0x1046: lg_map_key_clear(KEY_REDO); break; case 0x1047: lg_map_key_clear(KEY_PRINT); break; case 0x1048: lg_map_key_clear(KEY_SAVE); break; case 0x1049: lg_map_key_clear(KEY_PROG1); break; case 0x104a: lg_map_key_clear(KEY_PROG2); break; case 0x104b: lg_map_key_clear(KEY_PROG3); break; case 0x104c: lg_map_key_clear(KEY_PROG4); break; default: return 0; } return 1; } static int lg_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { /* extended mapping for certain Logitech hardware (Logitech cordless desktop LX500) */ static const u8 e_keymap[] = { 0,216, 0,213,175,156, 0, 0, 0, 0, 144, 0, 0, 0, 0, 0, 0, 0, 0,212, 174,167,152,161,112, 0, 0, 0,154, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,183,184,185,186,187, 188,189,190,191,192,193,194, 0, 0, 0 }; struct lg_drv_data *drv_data = hid_get_drvdata(hdev); unsigned int hid = usage->hid; if (hdev->product == USB_DEVICE_ID_LOGITECH_RECEIVER && lg_ultrax_remote_mapping(hi, usage, bit, max)) return 1; if ((drv_data->quirks & LG_WIRELESS) && lg_wireless_mapping(hi, usage, bit, max)) return 1; if ((hid & HID_USAGE_PAGE) != HID_UP_BUTTON) return 0; hid &= HID_USAGE; /* Special handling for Logitech Cordless Desktop */ if (field->application == HID_GD_MOUSE) { if ((drv_data->quirks & LG_IGNORE_DOUBLED_WHEEL) && (hid == 7 || hid == 8)) return -1; } else { if ((drv_data->quirks & LG_EXPANDED_KEYMAP) && hid < ARRAY_SIZE(e_keymap) && e_keymap[hid] != 0) { hid_map_usage(hi, usage, bit, max, EV_KEY, e_keymap[hid]); return 1; } } return 0; } static int lg_input_mapped(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { struct lg_drv_data *drv_data = hid_get_drvdata(hdev); if ((drv_data->quirks & LG_BAD_RELATIVE_KEYS) && usage->type == EV_KEY && (field->flags & HID_MAIN_ITEM_RELATIVE)) field->flags &= ~HID_MAIN_ITEM_RELATIVE; if ((drv_data->quirks & LG_DUPLICATE_USAGES) && (usage->type == EV_KEY || usage->type == EV_REL || usage->type == EV_ABS)) clear_bit(usage->code, *bit); /* Ensure that Logitech wheels are not given a default fuzz/flat value */ if (usage->type == EV_ABS && (usage->code == ABS_X || usage->code == ABS_Y || usage->code == ABS_Z || usage->code == ABS_RZ)) { switch (hdev->product) { case USB_DEVICE_ID_LOGITECH_G29_WHEEL: case USB_DEVICE_ID_LOGITECH_WINGMAN_FG: case USB_DEVICE_ID_LOGITECH_WINGMAN_FFG: case USB_DEVICE_ID_LOGITECH_WHEEL: case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL: case USB_DEVICE_ID_LOGITECH_DFP_WHEEL: case USB_DEVICE_ID_LOGITECH_G25_WHEEL: case USB_DEVICE_ID_LOGITECH_DFGT_WHEEL: case USB_DEVICE_ID_LOGITECH_G27_WHEEL: case USB_DEVICE_ID_LOGITECH_WII_WHEEL: case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2: case USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL: field->application = HID_GD_MULTIAXIS; break; default: break; } } return 0; } static int lg_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { struct lg_drv_data *drv_data = hid_get_drvdata(hdev); if ((drv_data->quirks & LG_INVERT_HWHEEL) && usage->code == REL_HWHEEL) { input_event(field->hidinput->input, usage->type, usage->code, -value); return 1; } if (drv_data->quirks & LG_FF4) { return lg4ff_adjust_input_event(hdev, field, usage, value, drv_data); } return 0; } static int lg_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *rd, int size) { struct lg_drv_data *drv_data = hid_get_drvdata(hdev); if (drv_data->quirks & LG_FF4) return lg4ff_raw_event(hdev, report, rd, size, drv_data); return 0; } static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct usb_interface *iface; __u8 iface_num; unsigned int connect_mask = HID_CONNECT_DEFAULT; struct lg_drv_data *drv_data; int ret; if (!hid_is_usb(hdev)) return -EINVAL; iface = to_usb_interface(hdev->dev.parent); iface_num = iface->cur_altsetting->desc.bInterfaceNumber; /* G29 only work with the 1st interface */ if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) && (iface_num != 0)) { dbg_hid("%s: ignoring ifnum %d\n", __func__, iface_num); return -ENODEV; } drv_data = kzalloc(sizeof(struct lg_drv_data), GFP_KERNEL); if (!drv_data) { hid_err(hdev, "Insufficient memory, cannot allocate driver data\n"); return -ENOMEM; } drv_data->quirks = id->driver_data; hid_set_drvdata(hdev, (void *)drv_data); if (drv_data->quirks & LG_NOGET) hdev->quirks |= HID_QUIRK_NOGET; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err_free; } if (drv_data->quirks & (LG_FF | LG_FF2 | LG_FF3 | LG_FF4)) connect_mask &= ~HID_CONNECT_FF; ret = hid_hw_start(hdev, connect_mask); if (ret) { hid_err(hdev, "hw start failed\n"); goto err_free; } /* Setup wireless link with Logitech Wii wheel */ if (hdev->product == USB_DEVICE_ID_LOGITECH_WII_WHEEL) { static const unsigned char cbuf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; u8 *buf = kmemdup(cbuf, sizeof(cbuf), GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto err_stop; } ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf), HID_FEATURE_REPORT, HID_REQ_SET_REPORT); if (ret >= 0) { /* insert a little delay of 10 jiffies ~ 40ms */ wait_queue_head_t wait; init_waitqueue_head (&wait); wait_event_interruptible_timeout(wait, 0, msecs_to_jiffies(40)); /* Select random Address */ buf[1] = 0xB2; get_random_bytes(&buf[2], 2); ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf), HID_FEATURE_REPORT, HID_REQ_SET_REPORT); } kfree(buf); } if (drv_data->quirks & LG_FF) ret = lgff_init(hdev); else if (drv_data->quirks & LG_FF2) ret = lg2ff_init(hdev); else if (drv_data->quirks & LG_FF3) ret = lg3ff_init(hdev); else if (drv_data->quirks & LG_FF4) ret = lg4ff_init(hdev); if (ret) goto err_stop; return 0; err_stop: hid_hw_stop(hdev); err_free: kfree(drv_data); return ret; } static void lg_remove(struct hid_device *hdev) { struct lg_drv_data *drv_data = hid_get_drvdata(hdev); if (drv_data->quirks & LG_FF4) lg4ff_deinit(hdev); hid_hw_stop(hdev); kfree(drv_data); } static const struct hid_device_id lg_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER), .driver_data = LG_RDESC | LG_WIRELESS }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER), .driver_data = LG_BAD_RELATIVE_KEYS }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP), .driver_data = LG_DUPLICATE_USAGES }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD), .driver_data = LG_IGNORE_DOUBLED_WHEEL | LG_EXPANDED_KEYMAP }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500), .driver_data = LG_IGNORE_DOUBLED_WHEEL | LG_EXPANDED_KEYMAP }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D), .driver_data = LG_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DUAL_ACTION), .driver_data = LG_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL), .driver_data = LG_NOGET | LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD), .driver_data = LG_FF2 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD), .driver_data = LG_FF }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2), .driver_data = LG_FF }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL), .driver_data = LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D), .driver_data = LG_FF }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO), .driver_data = LG_FF }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL), .driver_data = LG_NOGET | LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2), .driver_data = LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL), .driver_data = LG_FF2 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL), .driver_data = LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL), .driver_data = LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL), .driver_data = LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL), .driver_data = LG_NOGET | LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL), .driver_data = LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FG), .driver_data = LG_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG), .driver_data = LG_NOGET | LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2), .driver_data = LG_NOGET | LG_FF2 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940), .driver_data = LG_FF3 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR), .driver_data = LG_RDESC_REL_ABS }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER), .driver_data = LG_RDESC_REL_ABS }, { } }; MODULE_DEVICE_TABLE(hid, lg_devices); static struct hid_driver lg_driver = { .name = "logitech", .id_table = lg_devices, .report_fixup = lg_report_fixup, .input_mapping = lg_input_mapping, .input_mapped = lg_input_mapped, .event = lg_event, .raw_event = lg_raw_event, .probe = lg_probe, .remove = lg_remove, }; module_hid_driver(lg_driver); #ifdef CONFIG_LOGIWHEELS_FF int lg4ff_no_autoswitch = 0; module_param_named(lg4ff_no_autoswitch, lg4ff_no_autoswitch, int, S_IRUGO); MODULE_PARM_DESC(lg4ff_no_autoswitch, "Do not switch multimode wheels to their native mode automatically"); #endif MODULE_DESCRIPTION("HID driver for some logitech \"special\" devices"); MODULE_LICENSE("GPL");
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2014-2015, Qualcomm Atheros, Inc. */ #ifndef AES_GCM_H #define AES_GCM_H #include "aead_api.h" #define GCM_AAD_LEN 32 static inline int ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, u8 *data, size_t data_len, u8 *mic) { return aead_encrypt(tfm, j_0, aad + 2, be16_to_cpup((__be16 *)aad), data, data_len, mic); } static inline int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad, u8 *data, size_t data_len, u8 *mic) { return aead_decrypt(tfm, j_0, aad + 2, be16_to_cpup((__be16 *)aad), data, data_len, mic); } static inline struct crypto_aead * ieee80211_aes_gcm_key_setup_encrypt(const u8 key[], size_t key_len) { return aead_key_setup_encrypt("gcm(aes)", key, key_len, IEEE80211_GCMP_MIC_LEN); } static inline void ieee80211_aes_gcm_key_free(struct crypto_aead *tfm) { return aead_key_free(tfm); } #endif /* AES_GCM_H */
125 125 121 6 117 51 84 82 4 3 1 2 1 116 117 10 10 10 10 10 10 10 10 48 49 21 20 20 19 19 30 49 49 49 134 134 31 31 19 24 31 24 20 20 20 20 22 19 19 14 14 14 19 23 31 10 11 7 14 4 13 10 1 10 1 8 8 3 2 8 15 15 1 14 1 15 13 16 16 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) International Business Machines Corp., 2000-2004 * Portions Copyright (C) Christoph Hellwig, 2001-2002 */ #include <linux/fs.h> #include <linux/mpage.h> #include <linux/buffer_head.h> #include <linux/pagemap.h> #include <linux/quotaops.h> #include <linux/uio.h> #include <linux/writeback.h> #include "jfs_incore.h" #include "jfs_inode.h" #include "jfs_filsys.h" #include "jfs_imap.h" #include "jfs_extent.h" #include "jfs_unicode.h" #include "jfs_debug.h" #include "jfs_dmap.h" struct inode *jfs_iget(struct super_block *sb, unsigned long ino) { struct inode *inode; int ret; inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; ret = diRead(inode); if (ret < 0) { iget_failed(inode); return ERR_PTR(ret); } if (S_ISREG(inode->i_mode)) { inode->i_op = &jfs_file_inode_operations; inode->i_fop = &jfs_file_operations; inode->i_mapping->a_ops = &jfs_aops; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &jfs_dir_inode_operations; inode->i_fop = &jfs_dir_operations; } else if (S_ISLNK(inode->i_mode)) { if (inode->i_size >= IDATASIZE) { inode->i_op = &page_symlink_inode_operations; inode_nohighmem(inode); inode->i_mapping->a_ops = &jfs_aops; } else { inode->i_op = &jfs_fast_symlink_inode_operations; inode->i_link = JFS_IP(inode)->i_inline; /* * The inline data should be null-terminated, but * don't let on-disk corruption crash the kernel */ inode->i_link[inode->i_size] = '\0'; } } else { inode->i_op = &jfs_file_inode_operations; init_special_inode(inode, inode->i_mode, inode->i_rdev); } unlock_new_inode(inode); return inode; } /* * Workhorse of both fsync & write_inode */ int jfs_commit_inode(struct inode *inode, int wait) { int rc = 0; tid_t tid; static int noisy = 5; jfs_info("In jfs_commit_inode, inode = 0x%p", inode); /* * Don't commit if inode has been committed since last being * marked dirty, or if it has been deleted. */ if (inode->i_nlink == 0 || !test_cflag(COMMIT_Dirty, inode)) return 0; if (isReadOnly(inode)) { /* kernel allows writes to devices on read-only * partitions and may think inode is dirty */ if (!special_file(inode->i_mode) && noisy) { jfs_err("jfs_commit_inode(0x%p) called on read-only volume", inode); jfs_err("Is remount racy?"); noisy--; } return 0; } tid = txBegin(inode->i_sb, COMMIT_INODE); mutex_lock(&JFS_IP(inode)->commit_mutex); /* * Retest inode state after taking commit_mutex */ if (inode->i_nlink && test_cflag(COMMIT_Dirty, inode)) rc = txCommit(tid, 1, &inode, wait ? COMMIT_SYNC : 0); txEnd(tid); mutex_unlock(&JFS_IP(inode)->commit_mutex); return rc; } int jfs_write_inode(struct inode *inode, struct writeback_control *wbc) { int wait = wbc->sync_mode == WB_SYNC_ALL; if (inode->i_nlink == 0) return 0; /* * If COMMIT_DIRTY is not set, the inode isn't really dirty. * It has been committed since the last change, but was still * on the dirty inode list. */ if (!test_cflag(COMMIT_Dirty, inode)) { /* Make sure committed changes hit the disk */ jfs_flush_journal(JFS_SBI(inode->i_sb)->log, wait); return 0; } if (jfs_commit_inode(inode, wait)) { jfs_err("jfs_write_inode: jfs_commit_inode failed!"); return -EIO; } else return 0; } void jfs_evict_inode(struct inode *inode) { struct jfs_inode_info *ji = JFS_IP(inode); jfs_info("In jfs_evict_inode, inode = 0x%p", inode); if (!inode->i_nlink && !is_bad_inode(inode)) { dquot_initialize(inode); if (JFS_IP(inode)->fileset == FILESYSTEM_I) { struct inode *ipimap = JFS_SBI(inode->i_sb)->ipimap; truncate_inode_pages_final(&inode->i_data); if (test_cflag(COMMIT_Freewmap, inode)) jfs_free_zero_link(inode); if (ipimap && JFS_IP(ipimap)->i_imap) diFree(inode); /* * Free the inode from the quota allocation. */ dquot_free_inode(inode); } } else { truncate_inode_pages_final(&inode->i_data); } clear_inode(inode); dquot_drop(inode); BUG_ON(!list_empty(&ji->anon_inode_list)); spin_lock_irq(&ji->ag_lock); if (ji->active_ag != -1) { struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap; atomic_dec(&bmap->db_active[ji->active_ag]); ji->active_ag = -1; } spin_unlock_irq(&ji->ag_lock); } void jfs_dirty_inode(struct inode *inode, int flags) { static int noisy = 5; if (isReadOnly(inode)) { if (!special_file(inode->i_mode) && noisy) { /* kernel allows writes to devices on read-only * partitions and may try to mark inode dirty */ jfs_err("jfs_dirty_inode called on read-only volume"); jfs_err("Is remount racy?"); noisy--; } return; } set_cflag(COMMIT_Dirty, inode); } int jfs_get_block(struct inode *ip, sector_t lblock, struct buffer_head *bh_result, int create) { s64 lblock64 = lblock; int rc = 0; xad_t xad; s64 xaddr; int xflag; s32 xlen = bh_result->b_size >> ip->i_blkbits; /* * Take appropriate lock on inode */ if (create) IWRITE_LOCK(ip, RDWRLOCK_NORMAL); else IREAD_LOCK(ip, RDWRLOCK_NORMAL); if (((lblock64 << ip->i_sb->s_blocksize_bits) < ip->i_size) && (!xtLookup(ip, lblock64, xlen, &xflag, &xaddr, &xlen, 0)) && xaddr) { if (xflag & XAD_NOTRECORDED) { if (!create) /* * Allocated but not recorded, read treats * this as a hole */ goto unlock; XADoffset(&xad, lblock64); XADlength(&xad, xlen); XADaddress(&xad, xaddr); rc = extRecord(ip, &xad); if (rc) goto unlock; set_buffer_new(bh_result); } map_bh(bh_result, ip->i_sb, xaddr); bh_result->b_size = xlen << ip->i_blkbits; goto unlock; } if (!create) goto unlock; /* * Allocate a new block */ if ((rc = extHint(ip, lblock64 << ip->i_sb->s_blocksize_bits, &xad))) goto unlock; rc = extAlloc(ip, xlen, lblock64, &xad, false); if (rc) goto unlock; set_buffer_new(bh_result); map_bh(bh_result, ip->i_sb, addressXAD(&xad)); bh_result->b_size = lengthXAD(&xad) << ip->i_blkbits; unlock: /* * Release lock on inode */ if (create) IWRITE_UNLOCK(ip); else IREAD_UNLOCK(ip); return rc; } static int jfs_writepages(struct address_space *mapping, struct writeback_control *wbc) { return mpage_writepages(mapping, wbc, jfs_get_block); } static int jfs_read_folio(struct file *file, struct folio *folio) { return mpage_read_folio(folio, jfs_get_block); } static void jfs_readahead(struct readahead_control *rac) { mpage_readahead(rac, jfs_get_block); } static void jfs_write_failed(struct address_space *mapping, loff_t to) { struct inode *inode = mapping->host; if (to > inode->i_size) { truncate_pagecache(inode, inode->i_size); jfs_truncate(inode); } } static int jfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, void **fsdata) { int ret; ret = block_write_begin(mapping, pos, len, foliop, jfs_get_block); if (unlikely(ret)) jfs_write_failed(mapping, pos + len); return ret; } static int jfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct folio *folio, void *fsdata) { int ret; ret = generic_write_end(file, mapping, pos, len, copied, folio, fsdata); if (ret < len) jfs_write_failed(mapping, pos + len); return ret; } static sector_t jfs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping, block, jfs_get_block); } static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = file->f_mapping->host; size_t count = iov_iter_count(iter); ssize_t ret; ret = blockdev_direct_IO(iocb, inode, iter, jfs_get_block); /* * In case of error extending write may have instantiated a few * blocks outside i_size. Trim these off again. */ if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) { loff_t isize = i_size_read(inode); loff_t end = iocb->ki_pos + count; if (end > isize) jfs_write_failed(mapping, end); } return ret; } const struct address_space_operations jfs_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .read_folio = jfs_read_folio, .readahead = jfs_readahead, .writepages = jfs_writepages, .write_begin = jfs_write_begin, .write_end = jfs_write_end, .bmap = jfs_bmap, .direct_IO = jfs_direct_IO, .migrate_folio = buffer_migrate_folio, }; /* * Guts of jfs_truncate. Called with locks already held. Can be called * with directory for truncating directory index table. */ void jfs_truncate_nolock(struct inode *ip, loff_t length) { loff_t newsize; tid_t tid; ASSERT(length >= 0); if (test_cflag(COMMIT_Nolink, ip)) { xtTruncate(0, ip, length, COMMIT_WMAP); return; } do { tid = txBegin(ip->i_sb, 0); /* * The commit_mutex cannot be taken before txBegin. * txBegin may block and there is a chance the inode * could be marked dirty and need to be committed * before txBegin unblocks */ mutex_lock(&JFS_IP(ip)->commit_mutex); newsize = xtTruncate(tid, ip, length, COMMIT_TRUNCATE | COMMIT_PWMAP); if (newsize < 0) { txEnd(tid); mutex_unlock(&JFS_IP(ip)->commit_mutex); break; } inode_set_mtime_to_ts(ip, inode_set_ctime_current(ip)); mark_inode_dirty(ip); txCommit(tid, 1, &ip, 0); txEnd(tid); mutex_unlock(&JFS_IP(ip)->commit_mutex); } while (newsize > length); /* Truncate isn't always atomic */ } void jfs_truncate(struct inode *ip) { jfs_info("jfs_truncate: size = 0x%lx", (ulong) ip->i_size); block_truncate_page(ip->i_mapping, ip->i_size, jfs_get_block); IWRITE_LOCK(ip, RDWRLOCK_NORMAL); jfs_truncate_nolock(ip, ip->i_size); IWRITE_UNLOCK(ip); }
4243 13 13 1 1 13 4 4 4 2 2 2 1 2 4 4 1 1 4 2 2 4 4 11 11 11 11 11 11 11 4237 4234 1 1 2 2 4 4 4 4 13 13 4 2 13 2 2 2 2 2 2 10 2 10 7 3 1 2 2 9 3 8 4 4 1 3 3 6 6 6 2 5 2 5 1 1 1 4 4 4 12 14 4232 4243 4237 4243 4240 4245 4246 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 // SPDX-License-Identifier: GPL-2.0 /* * device_cgroup.c - device cgroup subsystem * * Copyright 2007 IBM Corp */ #include <linux/bpf-cgroup.h> #include <linux/device_cgroup.h> #include <linux/cgroup.h> #include <linux/ctype.h> #include <linux/list.h> #include <linux/uaccess.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/rcupdate.h> #include <linux/mutex.h> #ifdef CONFIG_CGROUP_DEVICE static DEFINE_MUTEX(devcgroup_mutex); enum devcg_behavior { DEVCG_DEFAULT_NONE, DEVCG_DEFAULT_ALLOW, DEVCG_DEFAULT_DENY, }; /* * exception list locking rules: * hold devcgroup_mutex for update/read. * hold rcu_read_lock() for read. */ struct dev_exception_item { u32 major, minor; short type; short access; struct list_head list; struct rcu_head rcu; }; struct dev_cgroup { struct cgroup_subsys_state css; struct list_head exceptions; enum devcg_behavior behavior; }; static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s) { return s ? container_of(s, struct dev_cgroup, css) : NULL; } static inline struct dev_cgroup *task_devcgroup(struct task_struct *task) { return css_to_devcgroup(task_css(task, devices_cgrp_id)); } /* * called under devcgroup_mutex */ static int dev_exceptions_copy(struct list_head *dest, struct list_head *orig) { struct dev_exception_item *ex, *tmp, *new; lockdep_assert_held(&devcgroup_mutex); list_for_each_entry(ex, orig, list) { new = kmemdup(ex, sizeof(*ex), GFP_KERNEL); if (!new) goto free_and_exit; list_add_tail(&new->list, dest); } return 0; free_and_exit: list_for_each_entry_safe(ex, tmp, dest, list) { list_del(&ex->list); kfree(ex); } return -ENOMEM; } static void dev_exceptions_move(struct list_head *dest, struct list_head *orig) { struct dev_exception_item *ex, *tmp; lockdep_assert_held(&devcgroup_mutex); list_for_each_entry_safe(ex, tmp, orig, list) { list_move_tail(&ex->list, dest); } } /* * called under devcgroup_mutex */ static int dev_exception_add(struct dev_cgroup *dev_cgroup, struct dev_exception_item *ex) { struct dev_exception_item *excopy, *walk; lockdep_assert_held(&devcgroup_mutex); excopy = kmemdup(ex, sizeof(*ex), GFP_KERNEL); if (!excopy) return -ENOMEM; list_for_each_entry(walk, &dev_cgroup->exceptions, list) { if (walk->type != ex->type) continue; if (walk->major != ex->major) continue; if (walk->minor != ex->minor) continue; walk->access |= ex->access; kfree(excopy); excopy = NULL; } if (excopy != NULL) list_add_tail_rcu(&excopy->list, &dev_cgroup->exceptions); return 0; } /* * called under devcgroup_mutex */ static void dev_exception_rm(struct dev_cgroup *dev_cgroup, struct dev_exception_item *ex) { struct dev_exception_item *walk, *tmp; lockdep_assert_held(&devcgroup_mutex); list_for_each_entry_safe(walk, tmp, &dev_cgroup->exceptions, list) { if (walk->type != ex->type) continue; if (walk->major != ex->major) continue; if (walk->minor != ex->minor) continue; walk->access &= ~ex->access; if (!walk->access) { list_del_rcu(&walk->list); kfree_rcu(walk, rcu); } } } static void __dev_exception_clean(struct dev_cgroup *dev_cgroup) { struct dev_exception_item *ex, *tmp; list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) { list_del_rcu(&ex->list); kfree_rcu(ex, rcu); } } /** * dev_exception_clean - frees all entries of the exception list * @dev_cgroup: dev_cgroup with the exception list to be cleaned * * called under devcgroup_mutex */ static void dev_exception_clean(struct dev_cgroup *dev_cgroup) { lockdep_assert_held(&devcgroup_mutex); __dev_exception_clean(dev_cgroup); } static inline bool is_devcg_online(const struct dev_cgroup *devcg) { return (devcg->behavior != DEVCG_DEFAULT_NONE); } /** * devcgroup_online - initializes devcgroup's behavior and exceptions based on * parent's * @css: css getting online * returns 0 in case of success, error code otherwise */ static int devcgroup_online(struct cgroup_subsys_state *css) { struct dev_cgroup *dev_cgroup = css_to_devcgroup(css); struct dev_cgroup *parent_dev_cgroup = css_to_devcgroup(css->parent); int ret = 0; mutex_lock(&devcgroup_mutex); if (parent_dev_cgroup == NULL) dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW; else { ret = dev_exceptions_copy(&dev_cgroup->exceptions, &parent_dev_cgroup->exceptions); if (!ret) dev_cgroup->behavior = parent_dev_cgroup->behavior; } mutex_unlock(&devcgroup_mutex); return ret; } static void devcgroup_offline(struct cgroup_subsys_state *css) { struct dev_cgroup *dev_cgroup = css_to_devcgroup(css); mutex_lock(&devcgroup_mutex); dev_cgroup->behavior = DEVCG_DEFAULT_NONE; mutex_unlock(&devcgroup_mutex); } /* * called from kernel/cgroup/cgroup.c with cgroup_lock() held. */ static struct cgroup_subsys_state * devcgroup_css_alloc(struct cgroup_subsys_state *parent_css) { struct dev_cgroup *dev_cgroup; dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL); if (!dev_cgroup) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&dev_cgroup->exceptions); dev_cgroup->behavior = DEVCG_DEFAULT_NONE; return &dev_cgroup->css; } static void devcgroup_css_free(struct cgroup_subsys_state *css) { struct dev_cgroup *dev_cgroup = css_to_devcgroup(css); __dev_exception_clean(dev_cgroup); kfree(dev_cgroup); } #define DEVCG_ALLOW 1 #define DEVCG_DENY 2 #define DEVCG_LIST 3 #define MAJMINLEN 13 #define ACCLEN 4 static void set_access(char *acc, short access) { int idx = 0; memset(acc, 0, ACCLEN); if (access & DEVCG_ACC_READ) acc[idx++] = 'r'; if (access & DEVCG_ACC_WRITE) acc[idx++] = 'w'; if (access & DEVCG_ACC_MKNOD) acc[idx++] = 'm'; } static char type_to_char(short type) { if (type == DEVCG_DEV_ALL) return 'a'; if (type == DEVCG_DEV_CHAR) return 'c'; if (type == DEVCG_DEV_BLOCK) return 'b'; return 'X'; } static void set_majmin(char *str, unsigned m) { if (m == ~0) strcpy(str, "*"); else sprintf(str, "%u", m); } static int devcgroup_seq_show(struct seq_file *m, void *v) { struct dev_cgroup *devcgroup = css_to_devcgroup(seq_css(m)); struct dev_exception_item *ex; char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN]; rcu_read_lock(); /* * To preserve the compatibility: * - Only show the "all devices" when the default policy is to allow * - List the exceptions in case the default policy is to deny * This way, the file remains as a "whitelist of devices" */ if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) { set_access(acc, DEVCG_ACC_MASK); set_majmin(maj, ~0); set_majmin(min, ~0); seq_printf(m, "%c %s:%s %s\n", type_to_char(DEVCG_DEV_ALL), maj, min, acc); } else { list_for_each_entry_rcu(ex, &devcgroup->exceptions, list) { set_access(acc, ex->access); set_majmin(maj, ex->major); set_majmin(min, ex->minor); seq_printf(m, "%c %s:%s %s\n", type_to_char(ex->type), maj, min, acc); } } rcu_read_unlock(); return 0; } /** * match_exception - iterates the exception list trying to find a complete match * @exceptions: list of exceptions * @type: device type (DEVCG_DEV_BLOCK or DEVCG_DEV_CHAR) * @major: device file major number, ~0 to match all * @minor: device file minor number, ~0 to match all * @access: permission mask (DEVCG_ACC_READ, DEVCG_ACC_WRITE, DEVCG_ACC_MKNOD) * * It is considered a complete match if an exception is found that will * contain the entire range of provided parameters. * * Return: true in case it matches an exception completely */ static bool match_exception(struct list_head *exceptions, short type, u32 major, u32 minor, short access) { struct dev_exception_item *ex; list_for_each_entry_rcu(ex, exceptions, list) { if ((type & DEVCG_DEV_BLOCK) && !(ex->type & DEVCG_DEV_BLOCK)) continue; if ((type & DEVCG_DEV_CHAR) && !(ex->type & DEVCG_DEV_CHAR)) continue; if (ex->major != ~0 && ex->major != major) continue; if (ex->minor != ~0 && ex->minor != minor) continue; /* provided access cannot have more than the exception rule */ if (access & (~ex->access)) continue; return true; } return false; } /** * match_exception_partial - iterates the exception list trying to find a partial match * @exceptions: list of exceptions * @type: device type (DEVCG_DEV_BLOCK or DEVCG_DEV_CHAR) * @major: device file major number, ~0 to match all * @minor: device file minor number, ~0 to match all * @access: permission mask (DEVCG_ACC_READ, DEVCG_ACC_WRITE, DEVCG_ACC_MKNOD) * * It is considered a partial match if an exception's range is found to * contain *any* of the devices specified by provided parameters. This is * used to make sure no extra access is being granted that is forbidden by * any of the exception list. * * Return: true in case the provided range mat matches an exception completely */ static bool match_exception_partial(struct list_head *exceptions, short type, u32 major, u32 minor, short access) { struct dev_exception_item *ex; list_for_each_entry_rcu(ex, exceptions, list, lockdep_is_held(&devcgroup_mutex)) { if ((type & DEVCG_DEV_BLOCK) && !(ex->type & DEVCG_DEV_BLOCK)) continue; if ((type & DEVCG_DEV_CHAR) && !(ex->type & DEVCG_DEV_CHAR)) continue; /* * We must be sure that both the exception and the provided * range aren't masking all devices */ if (ex->major != ~0 && major != ~0 && ex->major != major) continue; if (ex->minor != ~0 && minor != ~0 && ex->minor != minor) continue; /* * In order to make sure the provided range isn't matching * an exception, all its access bits shouldn't match the * exception's access bits */ if (!(access & ex->access)) continue; return true; } return false; } /** * verify_new_ex - verifies if a new exception is allowed by parent cgroup's permissions * @dev_cgroup: dev cgroup to be tested against * @refex: new exception * @behavior: behavior of the exception's dev_cgroup * * This is used to make sure a child cgroup won't have more privileges * than its parent */ static bool verify_new_ex(struct dev_cgroup *dev_cgroup, struct dev_exception_item *refex, enum devcg_behavior behavior) { bool match = false; RCU_LOCKDEP_WARN(!rcu_read_lock_held() && !lockdep_is_held(&devcgroup_mutex), "device_cgroup:verify_new_ex called without proper synchronization"); if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) { if (behavior == DEVCG_DEFAULT_ALLOW) { /* * new exception in the child doesn't matter, only * adding extra restrictions */ return true; } else { /* * new exception in the child will add more devices * that can be accessed, so it can't match any of * parent's exceptions, even slightly */ match = match_exception_partial(&dev_cgroup->exceptions, refex->type, refex->major, refex->minor, refex->access); if (match) return false; return true; } } else { /* * Only behavior == DEVCG_DEFAULT_DENY allowed here, therefore * the new exception will add access to more devices and must * be contained completely in an parent's exception to be * allowed */ match = match_exception(&dev_cgroup->exceptions, refex->type, refex->major, refex->minor, refex->access); if (match) /* parent has an exception that matches the proposed */ return true; else return false; } return false; } /* * parent_has_perm: * when adding a new allow rule to a device exception list, the rule * must be allowed in the parent device */ static int parent_has_perm(struct dev_cgroup *childcg, struct dev_exception_item *ex) { struct dev_cgroup *parent = css_to_devcgroup(childcg->css.parent); if (!parent) return 1; return verify_new_ex(parent, ex, childcg->behavior); } /** * parent_allows_removal - verify if it's ok to remove an exception * @childcg: child cgroup from where the exception will be removed * @ex: exception being removed * * When removing an exception in cgroups with default ALLOW policy, it must * be checked if removing it will give the child cgroup more access than the * parent. * * Return: true if it's ok to remove exception, false otherwise */ static bool parent_allows_removal(struct dev_cgroup *childcg, struct dev_exception_item *ex) { struct dev_cgroup *parent = css_to_devcgroup(childcg->css.parent); if (!parent) return true; /* It's always allowed to remove access to devices */ if (childcg->behavior == DEVCG_DEFAULT_DENY) return true; /* * Make sure you're not removing part or a whole exception existing in * the parent cgroup */ return !match_exception_partial(&parent->exceptions, ex->type, ex->major, ex->minor, ex->access); } /** * may_allow_all - checks if it's possible to change the behavior to * allow based on parent's rules. * @parent: device cgroup's parent * returns: != 0 in case it's allowed, 0 otherwise */ static inline int may_allow_all(struct dev_cgroup *parent) { if (!parent) return 1; return parent->behavior == DEVCG_DEFAULT_ALLOW; } /** * revalidate_active_exceptions - walks through the active exception list and * revalidates the exceptions based on parent's * behavior and exceptions. The exceptions that * are no longer valid will be removed. * Called with devcgroup_mutex held. * @devcg: cgroup which exceptions will be checked * * This is one of the three key functions for hierarchy implementation. * This function is responsible for re-evaluating all the cgroup's active * exceptions due to a parent's exception change. * Refer to Documentation/admin-guide/cgroup-v1/devices.rst for more details. */ static void revalidate_active_exceptions(struct dev_cgroup *devcg) { struct dev_exception_item *ex; struct list_head *this, *tmp; list_for_each_safe(this, tmp, &devcg->exceptions) { ex = container_of(this, struct dev_exception_item, list); if (!parent_has_perm(devcg, ex)) dev_exception_rm(devcg, ex); } } /** * propagate_exception - propagates a new exception to the children * @devcg_root: device cgroup that added a new exception * @ex: new exception to be propagated * * returns: 0 in case of success, != 0 in case of error */ static int propagate_exception(struct dev_cgroup *devcg_root, struct dev_exception_item *ex) { struct cgroup_subsys_state *pos; int rc = 0; rcu_read_lock(); css_for_each_descendant_pre(pos, &devcg_root->css) { struct dev_cgroup *devcg = css_to_devcgroup(pos); /* * Because devcgroup_mutex is held, no devcg will become * online or offline during the tree walk (see on/offline * methods), and online ones are safe to access outside RCU * read lock without bumping refcnt. */ if (pos == &devcg_root->css || !is_devcg_online(devcg)) continue; rcu_read_unlock(); /* * in case both root's behavior and devcg is allow, a new * restriction means adding to the exception list */ if (devcg_root->behavior == DEVCG_DEFAULT_ALLOW && devcg->behavior == DEVCG_DEFAULT_ALLOW) { rc = dev_exception_add(devcg, ex); if (rc) return rc; } else { /* * in the other possible cases: * root's behavior: allow, devcg's: deny * root's behavior: deny, devcg's: deny * the exception will be removed */ dev_exception_rm(devcg, ex); } revalidate_active_exceptions(devcg); rcu_read_lock(); } rcu_read_unlock(); return rc; } /* * Modify the exception list using allow/deny rules. * CAP_SYS_ADMIN is needed for this. It's at least separate from CAP_MKNOD * so we can give a container CAP_MKNOD to let it create devices but not * modify the exception list. * It seems likely we'll want to add a CAP_CONTAINER capability to allow * us to also grant CAP_SYS_ADMIN to containers without giving away the * device exception list controls, but for now we'll stick with CAP_SYS_ADMIN * * Taking rules away is always allowed (given CAP_SYS_ADMIN). Granting * new access is only allowed if you're in the top-level cgroup, or your * parent cgroup has the access you're asking for. */ static int devcgroup_update_access(struct dev_cgroup *devcgroup, int filetype, char *buffer) { const char *b; char temp[12]; /* 11 + 1 characters needed for a u32 */ int count, rc = 0; struct dev_exception_item ex; struct dev_cgroup *parent = css_to_devcgroup(devcgroup->css.parent); struct dev_cgroup tmp_devcgrp; if (!capable(CAP_SYS_ADMIN)) return -EPERM; memset(&ex, 0, sizeof(ex)); memset(&tmp_devcgrp, 0, sizeof(tmp_devcgrp)); b = buffer; switch (*b) { case 'a': switch (filetype) { case DEVCG_ALLOW: if (css_has_online_children(&devcgroup->css)) return -EINVAL; if (!may_allow_all(parent)) return -EPERM; if (!parent) { devcgroup->behavior = DEVCG_DEFAULT_ALLOW; dev_exception_clean(devcgroup); break; } INIT_LIST_HEAD(&tmp_devcgrp.exceptions); rc = dev_exceptions_copy(&tmp_devcgrp.exceptions, &devcgroup->exceptions); if (rc) return rc; dev_exception_clean(devcgroup); rc = dev_exceptions_copy(&devcgroup->exceptions, &parent->exceptions); if (rc) { dev_exceptions_move(&devcgroup->exceptions, &tmp_devcgrp.exceptions); return rc; } devcgroup->behavior = DEVCG_DEFAULT_ALLOW; dev_exception_clean(&tmp_devcgrp); break; case DEVCG_DENY: if (css_has_online_children(&devcgroup->css)) return -EINVAL; dev_exception_clean(devcgroup); devcgroup->behavior = DEVCG_DEFAULT_DENY; break; default: return -EINVAL; } return 0; case 'b': ex.type = DEVCG_DEV_BLOCK; break; case 'c': ex.type = DEVCG_DEV_CHAR; break; default: return -EINVAL; } b++; if (!isspace(*b)) return -EINVAL; b++; if (*b == '*') { ex.major = ~0; b++; } else if (isdigit(*b)) { memset(temp, 0, sizeof(temp)); for (count = 0; count < sizeof(temp) - 1; count++) { temp[count] = *b; b++; if (!isdigit(*b)) break; } rc = kstrtou32(temp, 10, &ex.major); if (rc) return -EINVAL; } else { return -EINVAL; } if (*b != ':') return -EINVAL; b++; /* read minor */ if (*b == '*') { ex.minor = ~0; b++; } else if (isdigit(*b)) { memset(temp, 0, sizeof(temp)); for (count = 0; count < sizeof(temp) - 1; count++) { temp[count] = *b; b++; if (!isdigit(*b)) break; } rc = kstrtou32(temp, 10, &ex.minor); if (rc) return -EINVAL; } else { return -EINVAL; } if (!isspace(*b)) return -EINVAL; for (b++, count = 0; count < 3; count++, b++) { switch (*b) { case 'r': ex.access |= DEVCG_ACC_READ; break; case 'w': ex.access |= DEVCG_ACC_WRITE; break; case 'm': ex.access |= DEVCG_ACC_MKNOD; break; case '\n': case '\0': count = 3; break; default: return -EINVAL; } } switch (filetype) { case DEVCG_ALLOW: /* * If the default policy is to allow by default, try to remove * an matching exception instead. And be silent about it: we * don't want to break compatibility */ if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) { /* Check if the parent allows removing it first */ if (!parent_allows_removal(devcgroup, &ex)) return -EPERM; dev_exception_rm(devcgroup, &ex); break; } if (!parent_has_perm(devcgroup, &ex)) return -EPERM; rc = dev_exception_add(devcgroup, &ex); break; case DEVCG_DENY: /* * If the default policy is to deny by default, try to remove * an matching exception instead. And be silent about it: we * don't want to break compatibility */ if (devcgroup->behavior == DEVCG_DEFAULT_DENY) dev_exception_rm(devcgroup, &ex); else rc = dev_exception_add(devcgroup, &ex); if (rc) break; /* we only propagate new restrictions */ rc = propagate_exception(devcgroup, &ex); break; default: rc = -EINVAL; } return rc; } static ssize_t devcgroup_access_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { int retval; mutex_lock(&devcgroup_mutex); retval = devcgroup_update_access(css_to_devcgroup(of_css(of)), of_cft(of)->private, strstrip(buf)); mutex_unlock(&devcgroup_mutex); return retval ?: nbytes; } static struct cftype dev_cgroup_files[] = { { .name = "allow", .write = devcgroup_access_write, .private = DEVCG_ALLOW, }, { .name = "deny", .write = devcgroup_access_write, .private = DEVCG_DENY, }, { .name = "list", .seq_show = devcgroup_seq_show, .private = DEVCG_LIST, }, { } /* terminate */ }; struct cgroup_subsys devices_cgrp_subsys = { .css_alloc = devcgroup_css_alloc, .css_free = devcgroup_css_free, .css_online = devcgroup_online, .css_offline = devcgroup_offline, .legacy_cftypes = dev_cgroup_files, }; /** * devcgroup_legacy_check_permission - checks if an inode operation is permitted * @type: device type * @major: device major number * @minor: device minor number * @access: combination of DEVCG_ACC_WRITE, DEVCG_ACC_READ and DEVCG_ACC_MKNOD * * returns 0 on success, -EPERM case the operation is not permitted */ static int devcgroup_legacy_check_permission(short type, u32 major, u32 minor, short access) { struct dev_cgroup *dev_cgroup; bool rc; rcu_read_lock(); dev_cgroup = task_devcgroup(current); if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) /* Can't match any of the exceptions, even partially */ rc = !match_exception_partial(&dev_cgroup->exceptions, type, major, minor, access); else /* Need to match completely one exception to be allowed */ rc = match_exception(&dev_cgroup->exceptions, type, major, minor, access); rcu_read_unlock(); if (!rc) return -EPERM; return 0; } #endif /* CONFIG_CGROUP_DEVICE */ #if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) int devcgroup_check_permission(short type, u32 major, u32 minor, short access) { int rc = BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access); if (rc) return rc; #ifdef CONFIG_CGROUP_DEVICE return devcgroup_legacy_check_permission(type, major, minor, access); #else /* CONFIG_CGROUP_DEVICE */ return 0; #endif /* CONFIG_CGROUP_DEVICE */ } EXPORT_SYMBOL(devcgroup_check_permission); #endif /* defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) */
20 20 20 20 20 20 20 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 // SPDX-License-Identifier: GPL-2.0-only /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/ip.h> #include <net/ipv6.h> #include <net/icmp.h> #include <net/udp.h> #include <net/tcp.h> #include <net/route.h> #include <linux/netfilter.h> #include <linux/netfilter_bridge.h> #include <linux/netfilter_ipv6.h> #include <linux/netfilter/xt_LOG.h> #include <net/netfilter/nf_log.h> static const struct nf_loginfo default_loginfo = { .type = NF_LOG_TYPE_LOG, .u = { .log = { .level = LOGLEVEL_NOTICE, .logflags = NF_LOG_DEFAULT_MASK, }, }, }; struct arppayload { unsigned char mac_src[ETH_ALEN]; unsigned char ip_src[4]; unsigned char mac_dst[ETH_ALEN]; unsigned char ip_dst[4]; }; /* Guard against containers flooding syslog. */ static bool nf_log_allowed(const struct net *net) { return net_eq(net, &init_net) || sysctl_nf_log_all_netns; } static void nf_log_dump_vlan(struct nf_log_buf *m, const struct sk_buff *skb) { u16 vid; if (!skb_vlan_tag_present(skb)) return; vid = skb_vlan_tag_get(skb); nf_log_buf_add(m, "VPROTO=%04x VID=%u ", ntohs(skb->vlan_proto), vid); } static void noinline_for_stack dump_arp_packet(struct nf_log_buf *m, const struct nf_loginfo *info, const struct sk_buff *skb, unsigned int nhoff) { const struct arppayload *ap; struct arppayload _arpp; const struct arphdr *ah; unsigned int logflags; struct arphdr _arph; ah = skb_header_pointer(skb, nhoff, sizeof(_arph), &_arph); if (!ah) { nf_log_buf_add(m, "TRUNCATED"); return; } if (info->type == NF_LOG_TYPE_LOG) logflags = info->u.log.logflags; else logflags = NF_LOG_DEFAULT_MASK; if (logflags & NF_LOG_MACDECODE) { nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ", eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest); nf_log_dump_vlan(m, skb); nf_log_buf_add(m, "MACPROTO=%04x ", ntohs(eth_hdr(skb)->h_proto)); } nf_log_buf_add(m, "ARP HTYPE=%d PTYPE=0x%04x OPCODE=%d", ntohs(ah->ar_hrd), ntohs(ah->ar_pro), ntohs(ah->ar_op)); /* If it's for Ethernet and the lengths are OK, then log the ARP * payload. */ if (ah->ar_hrd != htons(ARPHRD_ETHER) || ah->ar_hln != ETH_ALEN || ah->ar_pln != sizeof(__be32)) return; ap = skb_header_pointer(skb, nhoff + sizeof(_arph), sizeof(_arpp), &_arpp); if (!ap) { nf_log_buf_add(m, " INCOMPLETE [%zu bytes]", skb->len - sizeof(_arph)); return; } nf_log_buf_add(m, " MACSRC=%pM IPSRC=%pI4 MACDST=%pM IPDST=%pI4", ap->mac_src, ap->ip_src, ap->mac_dst, ap->ip_dst); } static void nf_log_dump_packet_common(struct nf_log_buf *m, u8 pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *loginfo, const char *prefix, struct net *net) { const struct net_device *physoutdev __maybe_unused; const struct net_device *physindev __maybe_unused; nf_log_buf_add(m, KERN_SOH "%c%sIN=%s OUT=%s ", '0' + loginfo->u.log.level, prefix, in ? in->name : "", out ? out->name : ""); #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) physindev = nf_bridge_get_physindev(skb, net); if (physindev && in != physindev) nf_log_buf_add(m, "PHYSIN=%s ", physindev->name); physoutdev = nf_bridge_get_physoutdev(skb); if (physoutdev && out != physoutdev) nf_log_buf_add(m, "PHYSOUT=%s ", physoutdev->name); #endif } static void nf_log_arp_packet(struct net *net, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *loginfo, const char *prefix) { struct nf_log_buf *m; if (!nf_log_allowed(net)) return; m = nf_log_buf_open(); if (!loginfo) loginfo = &default_loginfo; nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, loginfo, prefix, net); dump_arp_packet(m, loginfo, skb, skb_network_offset(skb)); nf_log_buf_close(m); } static struct nf_logger nf_arp_logger __read_mostly = { .name = "nf_log_arp", .type = NF_LOG_TYPE_LOG, .logfn = nf_log_arp_packet, .me = THIS_MODULE, }; static void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m, struct sock *sk) { if (!sk || !sk_fullsock(sk) || !net_eq(net, sock_net(sk))) return; read_lock_bh(&sk->sk_callback_lock); if (sk->sk_socket && sk->sk_socket->file) { const struct cred *cred = sk->sk_socket->file->f_cred; nf_log_buf_add(m, "UID=%u GID=%u ", from_kuid_munged(&init_user_ns, cred->fsuid), from_kgid_munged(&init_user_ns, cred->fsgid)); } read_unlock_bh(&sk->sk_callback_lock); } static noinline_for_stack int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb, u8 proto, int fragment, unsigned int offset, unsigned int logflags) { struct tcphdr _tcph; const struct tcphdr *th; /* Max length: 10 "PROTO=TCP " */ nf_log_buf_add(m, "PROTO=TCP "); if (fragment) return 0; /* Max length: 25 "INCOMPLETE [65535 bytes] " */ th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph); if (!th) { nf_log_buf_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset); return 1; } /* Max length: 20 "SPT=65535 DPT=65535 " */ nf_log_buf_add(m, "SPT=%u DPT=%u ", ntohs(th->source), ntohs(th->dest)); /* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */ if (logflags & NF_LOG_TCPSEQ) { nf_log_buf_add(m, "SEQ=%u ACK=%u ", ntohl(th->seq), ntohl(th->ack_seq)); } /* Max length: 13 "WINDOW=65535 " */ nf_log_buf_add(m, "WINDOW=%u ", ntohs(th->window)); /* Max length: 9 "RES=0x3C " */ nf_log_buf_add(m, "RES=0x%02x ", (u_int8_t)(ntohl(tcp_flag_word(th) & TCP_RESERVED_BITS) >> 22)); /* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */ if (th->cwr) nf_log_buf_add(m, "CWR "); if (th->ece) nf_log_buf_add(m, "ECE "); if (th->urg) nf_log_buf_add(m, "URG "); if (th->ack) nf_log_buf_add(m, "ACK "); if (th->psh) nf_log_buf_add(m, "PSH "); if (th->rst) nf_log_buf_add(m, "RST "); if (th->syn) nf_log_buf_add(m, "SYN "); if (th->fin) nf_log_buf_add(m, "FIN "); /* Max length: 11 "URGP=65535 " */ nf_log_buf_add(m, "URGP=%u ", ntohs(th->urg_ptr)); if ((logflags & NF_LOG_TCPOPT) && th->doff * 4 > sizeof(struct tcphdr)) { unsigned int optsize = th->doff * 4 - sizeof(struct tcphdr); u8 _opt[60 - sizeof(struct tcphdr)]; unsigned int i; const u8 *op; op = skb_header_pointer(skb, offset + sizeof(struct tcphdr), optsize, _opt); if (!op) { nf_log_buf_add(m, "OPT (TRUNCATED)"); return 1; } /* Max length: 127 "OPT (" 15*4*2chars ") " */ nf_log_buf_add(m, "OPT ("); for (i = 0; i < optsize; i++) nf_log_buf_add(m, "%02X", op[i]); nf_log_buf_add(m, ") "); } return 0; } static noinline_for_stack int nf_log_dump_udp_header(struct nf_log_buf *m, const struct sk_buff *skb, u8 proto, int fragment, unsigned int offset) { struct udphdr _udph; const struct udphdr *uh; if (proto == IPPROTO_UDP) /* Max length: 10 "PROTO=UDP " */ nf_log_buf_add(m, "PROTO=UDP "); else /* Max length: 14 "PROTO=UDPLITE " */ nf_log_buf_add(m, "PROTO=UDPLITE "); if (fragment) goto out; /* Max length: 25 "INCOMPLETE [65535 bytes] " */ uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph); if (!uh) { nf_log_buf_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset); return 1; } /* Max length: 20 "SPT=65535 DPT=65535 " */ nf_log_buf_add(m, "SPT=%u DPT=%u LEN=%u ", ntohs(uh->source), ntohs(uh->dest), ntohs(uh->len)); out: return 0; } /* One level of recursion won't kill us */ static noinline_for_stack void dump_ipv4_packet(struct net *net, struct nf_log_buf *m, const struct nf_loginfo *info, const struct sk_buff *skb, unsigned int iphoff) { const struct iphdr *ih; unsigned int logflags; struct iphdr _iph; if (info->type == NF_LOG_TYPE_LOG) logflags = info->u.log.logflags; else logflags = NF_LOG_DEFAULT_MASK; ih = skb_header_pointer(skb, iphoff, sizeof(_iph), &_iph); if (!ih) { nf_log_buf_add(m, "TRUNCATED"); return; } /* Important fields: * TOS, len, DF/MF, fragment offset, TTL, src, dst, options. * Max length: 40 "SRC=255.255.255.255 DST=255.255.255.255 " */ nf_log_buf_add(m, "SRC=%pI4 DST=%pI4 ", &ih->saddr, &ih->daddr); /* Max length: 46 "LEN=65535 TOS=0xFF PREC=0xFF TTL=255 ID=65535 " */ nf_log_buf_add(m, "LEN=%u TOS=0x%02X PREC=0x%02X TTL=%u ID=%u ", iph_totlen(skb, ih), ih->tos & IPTOS_TOS_MASK, ih->tos & IPTOS_PREC_MASK, ih->ttl, ntohs(ih->id)); /* Max length: 6 "CE DF MF " */ if (ntohs(ih->frag_off) & IP_CE) nf_log_buf_add(m, "CE "); if (ntohs(ih->frag_off) & IP_DF) nf_log_buf_add(m, "DF "); if (ntohs(ih->frag_off) & IP_MF) nf_log_buf_add(m, "MF "); /* Max length: 11 "FRAG:65535 " */ if (ntohs(ih->frag_off) & IP_OFFSET) nf_log_buf_add(m, "FRAG:%u ", ntohs(ih->frag_off) & IP_OFFSET); if ((logflags & NF_LOG_IPOPT) && ih->ihl * 4 > sizeof(struct iphdr)) { unsigned char _opt[4 * 15 - sizeof(struct iphdr)]; const unsigned char *op; unsigned int i, optsize; optsize = ih->ihl * 4 - sizeof(struct iphdr); op = skb_header_pointer(skb, iphoff + sizeof(_iph), optsize, _opt); if (!op) { nf_log_buf_add(m, "TRUNCATED"); return; } /* Max length: 127 "OPT (" 15*4*2chars ") " */ nf_log_buf_add(m, "OPT ("); for (i = 0; i < optsize; i++) nf_log_buf_add(m, "%02X", op[i]); nf_log_buf_add(m, ") "); } switch (ih->protocol) { case IPPROTO_TCP: if (nf_log_dump_tcp_header(m, skb, ih->protocol, ntohs(ih->frag_off) & IP_OFFSET, iphoff + ih->ihl * 4, logflags)) return; break; case IPPROTO_UDP: case IPPROTO_UDPLITE: if (nf_log_dump_udp_header(m, skb, ih->protocol, ntohs(ih->frag_off) & IP_OFFSET, iphoff + ih->ihl * 4)) return; break; case IPPROTO_ICMP: { static const size_t required_len[NR_ICMP_TYPES + 1] = { [ICMP_ECHOREPLY] = 4, [ICMP_DEST_UNREACH] = 8 + sizeof(struct iphdr), [ICMP_SOURCE_QUENCH] = 8 + sizeof(struct iphdr), [ICMP_REDIRECT] = 8 + sizeof(struct iphdr), [ICMP_ECHO] = 4, [ICMP_TIME_EXCEEDED] = 8 + sizeof(struct iphdr), [ICMP_PARAMETERPROB] = 8 + sizeof(struct iphdr), [ICMP_TIMESTAMP] = 20, [ICMP_TIMESTAMPREPLY] = 20, [ICMP_ADDRESS] = 12, [ICMP_ADDRESSREPLY] = 12 }; const struct icmphdr *ich; struct icmphdr _icmph; /* Max length: 11 "PROTO=ICMP " */ nf_log_buf_add(m, "PROTO=ICMP "); if (ntohs(ih->frag_off) & IP_OFFSET) break; /* Max length: 25 "INCOMPLETE [65535 bytes] " */ ich = skb_header_pointer(skb, iphoff + ih->ihl * 4, sizeof(_icmph), &_icmph); if (!ich) { nf_log_buf_add(m, "INCOMPLETE [%u bytes] ", skb->len - iphoff - ih->ihl * 4); break; } /* Max length: 18 "TYPE=255 CODE=255 " */ nf_log_buf_add(m, "TYPE=%u CODE=%u ", ich->type, ich->code); /* Max length: 25 "INCOMPLETE [65535 bytes] " */ if (ich->type <= NR_ICMP_TYPES && required_len[ich->type] && skb->len - iphoff - ih->ihl * 4 < required_len[ich->type]) { nf_log_buf_add(m, "INCOMPLETE [%u bytes] ", skb->len - iphoff - ih->ihl * 4); break; } switch (ich->type) { case ICMP_ECHOREPLY: case ICMP_ECHO: /* Max length: 19 "ID=65535 SEQ=65535 " */ nf_log_buf_add(m, "ID=%u SEQ=%u ", ntohs(ich->un.echo.id), ntohs(ich->un.echo.sequence)); break; case ICMP_PARAMETERPROB: /* Max length: 14 "PARAMETER=255 " */ nf_log_buf_add(m, "PARAMETER=%u ", ntohl(ich->un.gateway) >> 24); break; case ICMP_REDIRECT: /* Max length: 24 "GATEWAY=255.255.255.255 " */ nf_log_buf_add(m, "GATEWAY=%pI4 ", &ich->un.gateway); fallthrough; case ICMP_DEST_UNREACH: case ICMP_SOURCE_QUENCH: case ICMP_TIME_EXCEEDED: /* Max length: 3+maxlen */ if (!iphoff) { /* Only recurse once. */ nf_log_buf_add(m, "["); dump_ipv4_packet(net, m, info, skb, iphoff + ih->ihl * 4 + sizeof(_icmph)); nf_log_buf_add(m, "] "); } /* Max length: 10 "MTU=65535 " */ if (ich->type == ICMP_DEST_UNREACH && ich->code == ICMP_FRAG_NEEDED) { nf_log_buf_add(m, "MTU=%u ", ntohs(ich->un.frag.mtu)); } } break; } /* Max Length */ case IPPROTO_AH: { const struct ip_auth_hdr *ah; struct ip_auth_hdr _ahdr; if (ntohs(ih->frag_off) & IP_OFFSET) break; /* Max length: 9 "PROTO=AH " */ nf_log_buf_add(m, "PROTO=AH "); /* Max length: 25 "INCOMPLETE [65535 bytes] " */ ah = skb_header_pointer(skb, iphoff + ih->ihl * 4, sizeof(_ahdr), &_ahdr); if (!ah) { nf_log_buf_add(m, "INCOMPLETE [%u bytes] ", skb->len - iphoff - ih->ihl * 4); break; } /* Length: 15 "SPI=0xF1234567 " */ nf_log_buf_add(m, "SPI=0x%x ", ntohl(ah->spi)); break; } case IPPROTO_ESP: { const struct ip_esp_hdr *eh; struct ip_esp_hdr _esph; /* Max length: 10 "PROTO=ESP " */ nf_log_buf_add(m, "PROTO=ESP "); if (ntohs(ih->frag_off) & IP_OFFSET) break; /* Max length: 25 "INCOMPLETE [65535 bytes] " */ eh = skb_header_pointer(skb, iphoff + ih->ihl * 4, sizeof(_esph), &_esph); if (!eh) { nf_log_buf_add(m, "INCOMPLETE [%u bytes] ", skb->len - iphoff - ih->ihl * 4); break; } /* Length: 15 "SPI=0xF1234567 " */ nf_log_buf_add(m, "SPI=0x%x ", ntohl(eh->spi)); break; } /* Max length: 10 "PROTO 255 " */ default: nf_log_buf_add(m, "PROTO=%u ", ih->protocol); } /* Max length: 15 "UID=4294967295 " */ if ((logflags & NF_LOG_UID) && !iphoff) nf_log_dump_sk_uid_gid(net, m, skb->sk); /* Max length: 16 "MARK=0xFFFFFFFF " */ if (!iphoff && skb->mark) nf_log_buf_add(m, "MARK=0x%x ", skb->mark); /* Proto Max log string length */ /* IP: 40+46+6+11+127 = 230 */ /* TCP: 10+max(25,20+30+13+9+32+11+127) = 252 */ /* UDP: 10+max(25,20) = 35 */ /* UDPLITE: 14+max(25,20) = 39 */ /* ICMP: 11+max(25, 18+25+max(19,14,24+3+n+10,3+n+10)) = 91+n */ /* ESP: 10+max(25)+15 = 50 */ /* AH: 9+max(25)+15 = 49 */ /* unknown: 10 */ /* (ICMP allows recursion one level deep) */ /* maxlen = IP + ICMP + IP + max(TCP,UDP,ICMP,unknown) */ /* maxlen = 230+ 91 + 230 + 252 = 803 */ } static noinline_for_stack void dump_ipv6_packet(struct net *net, struct nf_log_buf *m, const struct nf_loginfo *info, const struct sk_buff *skb, unsigned int ip6hoff, int recurse) { const struct ipv6hdr *ih; unsigned int hdrlen = 0; unsigned int logflags; struct ipv6hdr _ip6h; unsigned int ptr; u8 currenthdr; int fragment; if (info->type == NF_LOG_TYPE_LOG) logflags = info->u.log.logflags; else logflags = NF_LOG_DEFAULT_MASK; ih = skb_header_pointer(skb, ip6hoff, sizeof(_ip6h), &_ip6h); if (!ih) { nf_log_buf_add(m, "TRUNCATED"); return; } /* Max length: 88 "SRC=0000.0000.0000.0000.0000.0000.0000.0000 DST=0000.0000.0000.0000.0000.0000.0000.0000 " */ nf_log_buf_add(m, "SRC=%pI6 DST=%pI6 ", &ih->saddr, &ih->daddr); /* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */ nf_log_buf_add(m, "LEN=%zu TC=%u HOPLIMIT=%u FLOWLBL=%u ", ntohs(ih->payload_len) + sizeof(struct ipv6hdr), (ntohl(*(__be32 *)ih) & 0x0ff00000) >> 20, ih->hop_limit, (ntohl(*(__be32 *)ih) & 0x000fffff)); fragment = 0; ptr = ip6hoff + sizeof(struct ipv6hdr); currenthdr = ih->nexthdr; while (currenthdr != NEXTHDR_NONE && nf_ip6_ext_hdr(currenthdr)) { struct ipv6_opt_hdr _hdr; const struct ipv6_opt_hdr *hp; hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr); if (!hp) { nf_log_buf_add(m, "TRUNCATED"); return; } /* Max length: 48 "OPT (...) " */ if (logflags & NF_LOG_IPOPT) nf_log_buf_add(m, "OPT ( "); switch (currenthdr) { case IPPROTO_FRAGMENT: { struct frag_hdr _fhdr; const struct frag_hdr *fh; nf_log_buf_add(m, "FRAG:"); fh = skb_header_pointer(skb, ptr, sizeof(_fhdr), &_fhdr); if (!fh) { nf_log_buf_add(m, "TRUNCATED "); return; } /* Max length: 6 "65535 " */ nf_log_buf_add(m, "%u ", ntohs(fh->frag_off) & 0xFFF8); /* Max length: 11 "INCOMPLETE " */ if (fh->frag_off & htons(0x0001)) nf_log_buf_add(m, "INCOMPLETE "); nf_log_buf_add(m, "ID:%08x ", ntohl(fh->identification)); if (ntohs(fh->frag_off) & 0xFFF8) fragment = 1; hdrlen = 8; break; } case IPPROTO_DSTOPTS: case IPPROTO_ROUTING: case IPPROTO_HOPOPTS: if (fragment) { if (logflags & NF_LOG_IPOPT) nf_log_buf_add(m, ")"); return; } hdrlen = ipv6_optlen(hp); break; /* Max Length */ case IPPROTO_AH: if (logflags & NF_LOG_IPOPT) { struct ip_auth_hdr _ahdr; const struct ip_auth_hdr *ah; /* Max length: 3 "AH " */ nf_log_buf_add(m, "AH "); if (fragment) { nf_log_buf_add(m, ")"); return; } ah = skb_header_pointer(skb, ptr, sizeof(_ahdr), &_ahdr); if (!ah) { /* Max length: 26 "INCOMPLETE [65535 bytes] )" */ nf_log_buf_add(m, "INCOMPLETE [%u bytes] )", skb->len - ptr); return; } /* Length: 15 "SPI=0xF1234567 */ nf_log_buf_add(m, "SPI=0x%x ", ntohl(ah->spi)); } hdrlen = ipv6_authlen(hp); break; case IPPROTO_ESP: if (logflags & NF_LOG_IPOPT) { struct ip_esp_hdr _esph; const struct ip_esp_hdr *eh; /* Max length: 4 "ESP " */ nf_log_buf_add(m, "ESP "); if (fragment) { nf_log_buf_add(m, ")"); return; } /* Max length: 26 "INCOMPLETE [65535 bytes] )" */ eh = skb_header_pointer(skb, ptr, sizeof(_esph), &_esph); if (!eh) { nf_log_buf_add(m, "INCOMPLETE [%u bytes] )", skb->len - ptr); return; } /* Length: 16 "SPI=0xF1234567 )" */ nf_log_buf_add(m, "SPI=0x%x )", ntohl(eh->spi)); } return; default: /* Max length: 20 "Unknown Ext Hdr 255" */ nf_log_buf_add(m, "Unknown Ext Hdr %u", currenthdr); return; } if (logflags & NF_LOG_IPOPT) nf_log_buf_add(m, ") "); currenthdr = hp->nexthdr; ptr += hdrlen; } switch (currenthdr) { case IPPROTO_TCP: if (nf_log_dump_tcp_header(m, skb, currenthdr, fragment, ptr, logflags)) return; break; case IPPROTO_UDP: case IPPROTO_UDPLITE: if (nf_log_dump_udp_header(m, skb, currenthdr, fragment, ptr)) return; break; case IPPROTO_ICMPV6: { struct icmp6hdr _icmp6h; const struct icmp6hdr *ic; /* Max length: 13 "PROTO=ICMPv6 " */ nf_log_buf_add(m, "PROTO=ICMPv6 "); if (fragment) break; /* Max length: 25 "INCOMPLETE [65535 bytes] " */ ic = skb_header_pointer(skb, ptr, sizeof(_icmp6h), &_icmp6h); if (!ic) { nf_log_buf_add(m, "INCOMPLETE [%u bytes] ", skb->len - ptr); return; } /* Max length: 18 "TYPE=255 CODE=255 " */ nf_log_buf_add(m, "TYPE=%u CODE=%u ", ic->icmp6_type, ic->icmp6_code); switch (ic->icmp6_type) { case ICMPV6_ECHO_REQUEST: case ICMPV6_ECHO_REPLY: /* Max length: 19 "ID=65535 SEQ=65535 " */ nf_log_buf_add(m, "ID=%u SEQ=%u ", ntohs(ic->icmp6_identifier), ntohs(ic->icmp6_sequence)); break; case ICMPV6_MGM_QUERY: case ICMPV6_MGM_REPORT: case ICMPV6_MGM_REDUCTION: break; case ICMPV6_PARAMPROB: /* Max length: 17 "POINTER=ffffffff " */ nf_log_buf_add(m, "POINTER=%08x ", ntohl(ic->icmp6_pointer)); fallthrough; case ICMPV6_DEST_UNREACH: case ICMPV6_PKT_TOOBIG: case ICMPV6_TIME_EXCEED: /* Max length: 3+maxlen */ if (recurse) { nf_log_buf_add(m, "["); dump_ipv6_packet(net, m, info, skb, ptr + sizeof(_icmp6h), 0); nf_log_buf_add(m, "] "); } /* Max length: 10 "MTU=65535 " */ if (ic->icmp6_type == ICMPV6_PKT_TOOBIG) { nf_log_buf_add(m, "MTU=%u ", ntohl(ic->icmp6_mtu)); } } break; } /* Max length: 10 "PROTO=255 " */ default: nf_log_buf_add(m, "PROTO=%u ", currenthdr); } /* Max length: 15 "UID=4294967295 " */ if ((logflags & NF_LOG_UID) && recurse) nf_log_dump_sk_uid_gid(net, m, skb->sk); /* Max length: 16 "MARK=0xFFFFFFFF " */ if (recurse && skb->mark) nf_log_buf_add(m, "MARK=0x%x ", skb->mark); } static void dump_mac_header(struct nf_log_buf *m, const struct nf_loginfo *info, const struct sk_buff *skb) { struct net_device *dev = skb->dev; unsigned int logflags = 0; if (info->type == NF_LOG_TYPE_LOG) logflags = info->u.log.logflags; if (!(logflags & NF_LOG_MACDECODE)) goto fallback; switch (dev->type) { case ARPHRD_ETHER: nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ", eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest); nf_log_dump_vlan(m, skb); nf_log_buf_add(m, "MACPROTO=%04x ", ntohs(eth_hdr(skb)->h_proto)); return; default: break; } fallback: nf_log_buf_add(m, "MAC="); if (dev->hard_header_len && skb->mac_header != skb->network_header) { const unsigned char *p = skb_mac_header(skb); unsigned int i; if (dev->type == ARPHRD_SIT) { p -= ETH_HLEN; if (p < skb->head) p = NULL; } if (p) { nf_log_buf_add(m, "%02x", *p++); for (i = 1; i < dev->hard_header_len; i++) nf_log_buf_add(m, ":%02x", *p++); } if (dev->type == ARPHRD_SIT) { const struct iphdr *iph = (struct iphdr *)skb_mac_header(skb); nf_log_buf_add(m, " TUNNEL=%pI4->%pI4", &iph->saddr, &iph->daddr); } } nf_log_buf_add(m, " "); } static void nf_log_ip_packet(struct net *net, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *loginfo, const char *prefix) { struct nf_log_buf *m; if (!nf_log_allowed(net)) return; m = nf_log_buf_open(); if (!loginfo) loginfo = &default_loginfo; nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, loginfo, prefix, net); if (in) dump_mac_header(m, loginfo, skb); dump_ipv4_packet(net, m, loginfo, skb, skb_network_offset(skb)); nf_log_buf_close(m); } static struct nf_logger nf_ip_logger __read_mostly = { .name = "nf_log_ipv4", .type = NF_LOG_TYPE_LOG, .logfn = nf_log_ip_packet, .me = THIS_MODULE, }; static void nf_log_ip6_packet(struct net *net, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *loginfo, const char *prefix) { struct nf_log_buf *m; if (!nf_log_allowed(net)) return; m = nf_log_buf_open(); if (!loginfo) loginfo = &default_loginfo; nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, loginfo, prefix, net); if (in) dump_mac_header(m, loginfo, skb); dump_ipv6_packet(net, m, loginfo, skb, skb_network_offset(skb), 1); nf_log_buf_close(m); } static struct nf_logger nf_ip6_logger __read_mostly = { .name = "nf_log_ipv6", .type = NF_LOG_TYPE_LOG, .logfn = nf_log_ip6_packet, .me = THIS_MODULE, }; static void nf_log_unknown_packet(struct net *net, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *loginfo, const char *prefix) { struct nf_log_buf *m; if (!nf_log_allowed(net)) return; m = nf_log_buf_open(); if (!loginfo) loginfo = &default_loginfo; nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, loginfo, prefix, net); dump_mac_header(m, loginfo, skb); nf_log_buf_close(m); } static void nf_log_netdev_packet(struct net *net, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *loginfo, const char *prefix) { switch (skb->protocol) { case htons(ETH_P_IP): nf_log_ip_packet(net, pf, hooknum, skb, in, out, loginfo, prefix); break; case htons(ETH_P_IPV6): nf_log_ip6_packet(net, pf, hooknum, skb, in, out, loginfo, prefix); break; case htons(ETH_P_ARP): case htons(ETH_P_RARP): nf_log_arp_packet(net, pf, hooknum, skb, in, out, loginfo, prefix); break; default: nf_log_unknown_packet(net, pf, hooknum, skb, in, out, loginfo, prefix); break; } } static struct nf_logger nf_netdev_logger __read_mostly = { .name = "nf_log_netdev", .type = NF_LOG_TYPE_LOG, .logfn = nf_log_netdev_packet, .me = THIS_MODULE, }; static struct nf_logger nf_bridge_logger __read_mostly = { .name = "nf_log_bridge", .type = NF_LOG_TYPE_LOG, .logfn = nf_log_netdev_packet, .me = THIS_MODULE, }; static int __net_init nf_log_syslog_net_init(struct net *net) { int ret = nf_log_set(net, NFPROTO_IPV4, &nf_ip_logger); if (ret) return ret; ret = nf_log_set(net, NFPROTO_ARP, &nf_arp_logger); if (ret) goto err1; ret = nf_log_set(net, NFPROTO_IPV6, &nf_ip6_logger); if (ret) goto err2; ret = nf_log_set(net, NFPROTO_NETDEV, &nf_netdev_logger); if (ret) goto err3; ret = nf_log_set(net, NFPROTO_BRIDGE, &nf_bridge_logger); if (ret) goto err4; return 0; err4: nf_log_unset(net, &nf_netdev_logger); err3: nf_log_unset(net, &nf_ip6_logger); err2: nf_log_unset(net, &nf_arp_logger); err1: nf_log_unset(net, &nf_ip_logger); return ret; } static void __net_exit nf_log_syslog_net_exit(struct net *net) { nf_log_unset(net, &nf_ip_logger); nf_log_unset(net, &nf_arp_logger); nf_log_unset(net, &nf_ip6_logger); nf_log_unset(net, &nf_netdev_logger); nf_log_unset(net, &nf_bridge_logger); } static struct pernet_operations nf_log_syslog_net_ops = { .init = nf_log_syslog_net_init, .exit = nf_log_syslog_net_exit, }; static int __init nf_log_syslog_init(void) { int ret; ret = register_pernet_subsys(&nf_log_syslog_net_ops); if (ret < 0) return ret; ret = nf_log_register(NFPROTO_IPV4, &nf_ip_logger); if (ret < 0) goto err1; ret = nf_log_register(NFPROTO_ARP, &nf_arp_logger); if (ret < 0) goto err2; ret = nf_log_register(NFPROTO_IPV6, &nf_ip6_logger); if (ret < 0) goto err3; ret = nf_log_register(NFPROTO_NETDEV, &nf_netdev_logger); if (ret < 0) goto err4; ret = nf_log_register(NFPROTO_BRIDGE, &nf_bridge_logger); if (ret < 0) goto err5; return 0; err5: nf_log_unregister(&nf_netdev_logger); err4: nf_log_unregister(&nf_ip6_logger); err3: nf_log_unregister(&nf_arp_logger); err2: nf_log_unregister(&nf_ip_logger); err1: pr_err("failed to register logger\n"); unregister_pernet_subsys(&nf_log_syslog_net_ops); return ret; } static void __exit nf_log_syslog_exit(void) { unregister_pernet_subsys(&nf_log_syslog_net_ops); nf_log_unregister(&nf_ip_logger); nf_log_unregister(&nf_arp_logger); nf_log_unregister(&nf_ip6_logger); nf_log_unregister(&nf_netdev_logger); nf_log_unregister(&nf_bridge_logger); } module_init(nf_log_syslog_init); module_exit(nf_log_syslog_exit); MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); MODULE_DESCRIPTION("Netfilter syslog packet logging"); MODULE_LICENSE("GPL"); MODULE_ALIAS("nf_log_arp"); MODULE_ALIAS("nf_log_bridge"); MODULE_ALIAS("nf_log_ipv4"); MODULE_ALIAS("nf_log_ipv6"); MODULE_ALIAS("nf_log_netdev"); MODULE_ALIAS_NF_LOGGER(AF_BRIDGE, 0); MODULE_ALIAS_NF_LOGGER(AF_INET, 0); MODULE_ALIAS_NF_LOGGER(3, 0); MODULE_ALIAS_NF_LOGGER(5, 0); /* NFPROTO_NETDEV */ MODULE_ALIAS_NF_LOGGER(AF_INET6, 0);
6 6 1 5 6 3 2 2 1 1 1 2 2 1 1 1 1 1 2 1 1 1 2 2 2 1 1 11 11 3 2 1 2 8 1 11 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2014 - 2020 Intel Corporation */ #include <crypto/algapi.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/bitops.h> #include <linux/pci.h> #include <linux/cdev.h> #include <linux/uaccess.h> #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_cfg.h" #include "adf_cfg_common.h" #include "adf_cfg_user.h" #define ADF_CFG_MAX_SECTION 512 #define ADF_CFG_MAX_KEY_VAL 256 #define DEVICE_NAME "qat_adf_ctl" static DEFINE_MUTEX(adf_ctl_lock); static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg); static const struct file_operations adf_ctl_ops = { .owner = THIS_MODULE, .unlocked_ioctl = adf_ctl_ioctl, .compat_ioctl = compat_ptr_ioctl, }; static const struct class adf_ctl_class = { .name = DEVICE_NAME, }; struct adf_ctl_drv_info { unsigned int major; struct cdev drv_cdev; }; static struct adf_ctl_drv_info adf_ctl_drv; static void adf_chr_drv_destroy(void) { device_destroy(&adf_ctl_class, MKDEV(adf_ctl_drv.major, 0)); cdev_del(&adf_ctl_drv.drv_cdev); class_unregister(&adf_ctl_class); unregister_chrdev_region(MKDEV(adf_ctl_drv.major, 0), 1); } static int adf_chr_drv_create(void) { dev_t dev_id; struct device *drv_device; int ret; if (alloc_chrdev_region(&dev_id, 0, 1, DEVICE_NAME)) { pr_err("QAT: unable to allocate chrdev region\n"); return -EFAULT; } ret = class_register(&adf_ctl_class); if (ret) goto err_chrdev_unreg; adf_ctl_drv.major = MAJOR(dev_id); cdev_init(&adf_ctl_drv.drv_cdev, &adf_ctl_ops); if (cdev_add(&adf_ctl_drv.drv_cdev, dev_id, 1)) { pr_err("QAT: cdev add failed\n"); goto err_class_destr; } drv_device = device_create(&adf_ctl_class, NULL, MKDEV(adf_ctl_drv.major, 0), NULL, DEVICE_NAME); if (IS_ERR(drv_device)) { pr_err("QAT: failed to create device\n"); goto err_cdev_del; } return 0; err_cdev_del: cdev_del(&adf_ctl_drv.drv_cdev); err_class_destr: class_unregister(&adf_ctl_class); err_chrdev_unreg: unregister_chrdev_region(dev_id, 1); return -EFAULT; } static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data, unsigned long arg) { struct adf_user_cfg_ctl_data *cfg_data; cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL); if (!cfg_data) return -ENOMEM; /* Initialize device id to NO DEVICE as 0 is a valid device id */ cfg_data->device_id = ADF_CFG_NO_DEVICE; if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) { pr_err("QAT: failed to copy from user cfg_data.\n"); kfree(cfg_data); return -EIO; } *ctl_data = cfg_data; return 0; } static int adf_add_key_value_data(struct adf_accel_dev *accel_dev, const char *section, const struct adf_user_cfg_key_val *key_val) { if (key_val->type == ADF_HEX) { long *ptr = (long *)key_val->val; long val = *ptr; if (adf_cfg_add_key_value_param(accel_dev, section, key_val->key, (void *)val, key_val->type)) { dev_err(&GET_DEV(accel_dev), "failed to add hex keyvalue.\n"); return -EFAULT; } } else { if (adf_cfg_add_key_value_param(accel_dev, section, key_val->key, key_val->val, key_val->type)) { dev_err(&GET_DEV(accel_dev), "failed to add keyvalue.\n"); return -EFAULT; } } return 0; } static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev, struct adf_user_cfg_ctl_data *ctl_data) { struct adf_user_cfg_key_val key_val; struct adf_user_cfg_key_val *params_head; struct adf_user_cfg_section section, *section_head; int i, j; section_head = ctl_data->config_section; for (i = 0; section_head && i < ADF_CFG_MAX_SECTION; i++) { if (copy_from_user(&section, (void __user *)section_head, sizeof(*section_head))) { dev_err(&GET_DEV(accel_dev), "failed to copy section info\n"); goto out_err; } if (adf_cfg_section_add(accel_dev, section.name)) { dev_err(&GET_DEV(accel_dev), "failed to add section.\n"); goto out_err; } params_head = section.params; for (j = 0; params_head && j < ADF_CFG_MAX_KEY_VAL; j++) { if (copy_from_user(&key_val, (void __user *)params_head, sizeof(key_val))) { dev_err(&GET_DEV(accel_dev), "Failed to copy keyvalue.\n"); goto out_err; } if (adf_add_key_value_data(accel_dev, section.name, &key_val)) { goto out_err; } params_head = key_val.next; } section_head = section.next; } return 0; out_err: adf_cfg_del_all(accel_dev); return -EFAULT; } static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd, unsigned long arg) { int ret; struct adf_user_cfg_ctl_data *ctl_data; struct adf_accel_dev *accel_dev; ret = adf_ctl_alloc_resources(&ctl_data, arg); if (ret) return ret; accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id); if (!accel_dev) { ret = -EFAULT; goto out; } if (adf_dev_started(accel_dev)) { ret = -EFAULT; goto out; } if (adf_copy_key_value_data(accel_dev, ctl_data)) { ret = -EFAULT; goto out; } set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); out: kfree(ctl_data); return ret; } static int adf_ctl_is_device_in_use(int id) { struct adf_accel_dev *dev; list_for_each_entry(dev, adf_devmgr_get_head(), list) { if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) { if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) { dev_info(&GET_DEV(dev), "device qat_dev%d is busy\n", dev->accel_id); return -EBUSY; } } } return 0; } static void adf_ctl_stop_devices(u32 id) { struct adf_accel_dev *accel_dev; list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) { if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) { if (!adf_dev_started(accel_dev)) continue; /* First stop all VFs */ if (!accel_dev->is_vf) continue; adf_dev_down(accel_dev); } } list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) { if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) { if (!adf_dev_started(accel_dev)) continue; adf_dev_down(accel_dev); } } } static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd, unsigned long arg) { int ret; struct adf_user_cfg_ctl_data *ctl_data; ret = adf_ctl_alloc_resources(&ctl_data, arg); if (ret) return ret; if (adf_devmgr_verify_id(ctl_data->device_id)) { pr_err("QAT: Device %d not found\n", ctl_data->device_id); ret = -ENODEV; goto out; } ret = adf_ctl_is_device_in_use(ctl_data->device_id); if (ret) goto out; if (ctl_data->device_id == ADF_CFG_ALL_DEVICES) pr_info("QAT: Stopping all acceleration devices.\n"); else pr_info("QAT: Stopping acceleration device qat_dev%d.\n", ctl_data->device_id); adf_ctl_stop_devices(ctl_data->device_id); out: kfree(ctl_data); return ret; } static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd, unsigned long arg) { int ret; struct adf_user_cfg_ctl_data *ctl_data; struct adf_accel_dev *accel_dev; ret = adf_ctl_alloc_resources(&ctl_data, arg); if (ret) return ret; ret = -ENODEV; accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id); if (!accel_dev) goto out; dev_info(&GET_DEV(accel_dev), "Starting acceleration device qat_dev%d.\n", ctl_data->device_id); ret = adf_dev_up(accel_dev, false); if (ret) { dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n", ctl_data->device_id); adf_dev_down(accel_dev); } out: kfree(ctl_data); return ret; } static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd, unsigned long arg) { u32 num_devices = 0; adf_devmgr_get_num_dev(&num_devices); if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices))) return -EFAULT; return 0; } static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd, unsigned long arg) { struct adf_hw_device_data *hw_data; struct adf_dev_status_info dev_info; struct adf_accel_dev *accel_dev; if (copy_from_user(&dev_info, (void __user *)arg, sizeof(struct adf_dev_status_info))) { pr_err("QAT: failed to copy from user.\n"); return -EFAULT; } accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id); if (!accel_dev) return -ENODEV; hw_data = accel_dev->hw_device; dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN; dev_info.num_ae = hw_data->get_num_aes(hw_data); dev_info.num_accel = hw_data->get_num_accels(hw_data); dev_info.num_logical_accel = hw_data->num_logical_accel; dev_info.banks_per_accel = hw_data->num_banks / hw_data->num_logical_accel; strscpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name)); dev_info.instance_id = hw_data->instance_id; dev_info.type = hw_data->dev_class->type; dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number; dev_info.dev = PCI_SLOT(accel_to_pci_dev(accel_dev)->devfn); dev_info.fun = PCI_FUNC(accel_to_pci_dev(accel_dev)->devfn); if (copy_to_user((void __user *)arg, &dev_info, sizeof(struct adf_dev_status_info))) { dev_err(&GET_DEV(accel_dev), "failed to copy status.\n"); return -EFAULT; } return 0; } static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) { int ret; if (mutex_lock_interruptible(&adf_ctl_lock)) return -EFAULT; switch (cmd) { case IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS: ret = adf_ctl_ioctl_dev_config(fp, cmd, arg); break; case IOCTL_STOP_ACCEL_DEV: ret = adf_ctl_ioctl_dev_stop(fp, cmd, arg); break; case IOCTL_START_ACCEL_DEV: ret = adf_ctl_ioctl_dev_start(fp, cmd, arg); break; case IOCTL_GET_NUM_DEVICES: ret = adf_ctl_ioctl_get_num_devices(fp, cmd, arg); break; case IOCTL_STATUS_ACCEL_DEV: ret = adf_ctl_ioctl_get_status(fp, cmd, arg); break; default: pr_err_ratelimited("QAT: Invalid ioctl %d\n", cmd); ret = -EFAULT; break; } mutex_unlock(&adf_ctl_lock); return ret; } static int __init adf_register_ctl_device_driver(void) { if (adf_chr_drv_create()) goto err_chr_dev; if (adf_init_misc_wq()) goto err_misc_wq; if (adf_init_aer()) goto err_aer; if (adf_init_pf_wq()) goto err_pf_wq; if (adf_init_vf_wq()) goto err_vf_wq; if (qat_crypto_register()) goto err_crypto_register; if (qat_compression_register()) goto err_compression_register; return 0; err_compression_register: qat_crypto_unregister(); err_crypto_register: adf_exit_vf_wq(); err_vf_wq: adf_exit_pf_wq(); err_pf_wq: adf_exit_aer(); err_aer: adf_exit_misc_wq(); err_misc_wq: adf_chr_drv_destroy(); err_chr_dev: mutex_destroy(&adf_ctl_lock); return -EFAULT; } static void __exit adf_unregister_ctl_device_driver(void) { adf_chr_drv_destroy(); adf_exit_misc_wq(); adf_exit_aer(); adf_exit_vf_wq(); adf_exit_pf_wq(); qat_crypto_unregister(); qat_compression_unregister(); adf_clean_vf_map(false); mutex_destroy(&adf_ctl_lock); } module_init(adf_register_ctl_device_driver); module_exit(adf_unregister_ctl_device_driver); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel"); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_ALIAS_CRYPTO("intel_qat"); MODULE_VERSION(ADF_DRV_VERSION); MODULE_IMPORT_NS("CRYPTO_INTERNAL");
10 255 151 226 297 69 294 281 259 28 28 10 28 226 291 218 268 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 /* SPDX-License-Identifier: GPL-2.0-only */ /* * * Copyright (C) 2011 Novell Inc. * Copyright (C) 2016 Red Hat, Inc. */ struct ovl_config { char *upperdir; char *workdir; char **lowerdirs; bool default_permissions; int redirect_mode; int verity_mode; bool index; int uuid; bool nfs_export; int xino; bool metacopy; bool userxattr; bool ovl_volatile; }; struct ovl_sb { struct super_block *sb; dev_t pseudo_dev; /* Unusable (conflicting) uuid */ bool bad_uuid; /* Used as a lower layer (but maybe also as upper) */ bool is_lower; }; struct ovl_layer { /* ovl_free_fs() relies on @mnt being the first member! */ struct vfsmount *mnt; /* Trap in ovl inode cache */ struct inode *trap; struct ovl_sb *fs; /* Index of this layer in fs root (upper idx == 0) */ int idx; /* One fsid per unique underlying sb (upper fsid == 0) */ int fsid; /* xwhiteouts were found on this layer */ bool has_xwhiteouts; }; struct ovl_path { const struct ovl_layer *layer; struct dentry *dentry; }; struct ovl_entry { unsigned int __numlower; struct ovl_path __lowerstack[]; }; /* private information held for overlayfs's superblock */ struct ovl_fs { unsigned int numlayer; /* Number of unique fs among layers including upper fs */ unsigned int numfs; /* Number of data-only lower layers */ unsigned int numdatalayer; struct ovl_layer *layers; struct ovl_sb *fs; /* workbasedir is the path at workdir= mount option */ struct dentry *workbasedir; /* workdir is the 'work' or 'index' directory under workbasedir */ struct dentry *workdir; long namelen; /* pathnames of lower and upper dirs, for show_options */ struct ovl_config config; /* creds of process who forced instantiation of super block */ const struct cred *creator_cred; bool tmpfile; bool noxattr; bool nofh; /* Did we take the inuse lock? */ bool upperdir_locked; bool workdir_locked; /* Traps in ovl inode cache */ struct inode *workbasedir_trap; struct inode *workdir_trap; /* -1: disabled, 0: same fs, 1..32: number of unused ino bits */ int xino_mode; /* For allocation of non-persistent inode numbers */ atomic_long_t last_ino; /* Shared whiteout cache */ struct dentry *whiteout; bool no_shared_whiteout; /* r/o snapshot of upperdir sb's only taken on volatile mounts */ errseq_t errseq; }; /* Number of lower layers, not including data-only layers */ static inline unsigned int ovl_numlowerlayer(struct ovl_fs *ofs) { return ofs->numlayer - ofs->numdatalayer - 1; } static inline struct vfsmount *ovl_upper_mnt(struct ovl_fs *ofs) { return ofs->layers[0].mnt; } static inline struct mnt_idmap *ovl_upper_mnt_idmap(struct ovl_fs *ofs) { return mnt_idmap(ovl_upper_mnt(ofs)); } extern struct file_system_type ovl_fs_type; static inline struct ovl_fs *OVL_FS(struct super_block *sb) { if (IS_ENABLED(CONFIG_OVERLAY_FS_DEBUG)) WARN_ON_ONCE(sb->s_type != &ovl_fs_type); return (struct ovl_fs *)sb->s_fs_info; } static inline bool ovl_should_sync(struct ovl_fs *ofs) { return !ofs->config.ovl_volatile; } static inline unsigned int ovl_numlower(struct ovl_entry *oe) { return oe ? oe->__numlower : 0; } static inline struct ovl_path *ovl_lowerstack(struct ovl_entry *oe) { return ovl_numlower(oe) ? oe->__lowerstack : NULL; } static inline struct ovl_path *ovl_lowerpath(struct ovl_entry *oe) { return ovl_lowerstack(oe); } static inline struct ovl_path *ovl_lowerdata(struct ovl_entry *oe) { struct ovl_path *lowerstack = ovl_lowerstack(oe); return lowerstack ? &lowerstack[oe->__numlower - 1] : NULL; } /* May return NULL if lazy lookup of lowerdata is needed */ static inline struct dentry *ovl_lowerdata_dentry(struct ovl_entry *oe) { struct ovl_path *lowerdata = ovl_lowerdata(oe); return lowerdata ? READ_ONCE(lowerdata->dentry) : NULL; } /* private information held for every overlayfs dentry */ static inline unsigned long *OVL_E_FLAGS(struct dentry *dentry) { return (unsigned long *) &dentry->d_fsdata; } struct ovl_inode { union { struct ovl_dir_cache *cache; /* directory */ const char *lowerdata_redirect; /* regular file */ }; const char *redirect; u64 version; unsigned long flags; struct inode vfs_inode; struct dentry *__upperdentry; struct ovl_entry *oe; /* synchronize copy up and more */ struct mutex lock; }; static inline struct ovl_inode *OVL_I(struct inode *inode) { return container_of(inode, struct ovl_inode, vfs_inode); } static inline struct ovl_entry *OVL_I_E(struct inode *inode) { return inode ? OVL_I(inode)->oe : NULL; } static inline struct ovl_entry *OVL_E(struct dentry *dentry) { return OVL_I_E(d_inode(dentry)); } static inline struct dentry *ovl_upperdentry_dereference(struct ovl_inode *oi) { return READ_ONCE(oi->__upperdentry); }
5 5 5 5 5 5 5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 // SPDX-License-Identifier: GPL-2.0-or-later /* * Cryptographic API. * * DES & Triple DES EDE Cipher Algorithms. * * Copyright (c) 2005 Dag Arne Osvik <da@osvik.no> */ #include <linux/bitops.h> #include <linux/compiler.h> #include <linux/crypto.h> #include <linux/errno.h> #include <linux/fips.h> #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> #include <linux/types.h> #include <linux/unaligned.h> #include <crypto/des.h> #include <crypto/internal/des.h> #define ROL(x, r) ((x) = rol32((x), (r))) #define ROR(x, r) ((x) = ror32((x), (r))) /* Lookup tables for key expansion */ static const u8 pc1[256] = { 0x00, 0x00, 0x40, 0x04, 0x10, 0x10, 0x50, 0x14, 0x04, 0x40, 0x44, 0x44, 0x14, 0x50, 0x54, 0x54, 0x02, 0x02, 0x42, 0x06, 0x12, 0x12, 0x52, 0x16, 0x06, 0x42, 0x46, 0x46, 0x16, 0x52, 0x56, 0x56, 0x80, 0x08, 0xc0, 0x0c, 0x90, 0x18, 0xd0, 0x1c, 0x84, 0x48, 0xc4, 0x4c, 0x94, 0x58, 0xd4, 0x5c, 0x82, 0x0a, 0xc2, 0x0e, 0x92, 0x1a, 0xd2, 0x1e, 0x86, 0x4a, 0xc6, 0x4e, 0x96, 0x5a, 0xd6, 0x5e, 0x20, 0x20, 0x60, 0x24, 0x30, 0x30, 0x70, 0x34, 0x24, 0x60, 0x64, 0x64, 0x34, 0x70, 0x74, 0x74, 0x22, 0x22, 0x62, 0x26, 0x32, 0x32, 0x72, 0x36, 0x26, 0x62, 0x66, 0x66, 0x36, 0x72, 0x76, 0x76, 0xa0, 0x28, 0xe0, 0x2c, 0xb0, 0x38, 0xf0, 0x3c, 0xa4, 0x68, 0xe4, 0x6c, 0xb4, 0x78, 0xf4, 0x7c, 0xa2, 0x2a, 0xe2, 0x2e, 0xb2, 0x3a, 0xf2, 0x3e, 0xa6, 0x6a, 0xe6, 0x6e, 0xb6, 0x7a, 0xf6, 0x7e, 0x08, 0x80, 0x48, 0x84, 0x18, 0x90, 0x58, 0x94, 0x0c, 0xc0, 0x4c, 0xc4, 0x1c, 0xd0, 0x5c, 0xd4, 0x0a, 0x82, 0x4a, 0x86, 0x1a, 0x92, 0x5a, 0x96, 0x0e, 0xc2, 0x4e, 0xc6, 0x1e, 0xd2, 0x5e, 0xd6, 0x88, 0x88, 0xc8, 0x8c, 0x98, 0x98, 0xd8, 0x9c, 0x8c, 0xc8, 0xcc, 0xcc, 0x9c, 0xd8, 0xdc, 0xdc, 0x8a, 0x8a, 0xca, 0x8e, 0x9a, 0x9a, 0xda, 0x9e, 0x8e, 0xca, 0xce, 0xce, 0x9e, 0xda, 0xde, 0xde, 0x28, 0xa0, 0x68, 0xa4, 0x38, 0xb0, 0x78, 0xb4, 0x2c, 0xe0, 0x6c, 0xe4, 0x3c, 0xf0, 0x7c, 0xf4, 0x2a, 0xa2, 0x6a, 0xa6, 0x3a, 0xb2, 0x7a, 0xb6, 0x2e, 0xe2, 0x6e, 0xe6, 0x3e, 0xf2, 0x7e, 0xf6, 0xa8, 0xa8, 0xe8, 0xac, 0xb8, 0xb8, 0xf8, 0xbc, 0xac, 0xe8, 0xec, 0xec, 0xbc, 0xf8, 0xfc, 0xfc, 0xaa, 0xaa, 0xea, 0xae, 0xba, 0xba, 0xfa, 0xbe, 0xae, 0xea, 0xee, 0xee, 0xbe, 0xfa, 0xfe, 0xfe }; static const u8 rs[256] = { 0x00, 0x00, 0x80, 0x80, 0x02, 0x02, 0x82, 0x82, 0x04, 0x04, 0x84, 0x84, 0x06, 0x06, 0x86, 0x86, 0x08, 0x08, 0x88, 0x88, 0x0a, 0x0a, 0x8a, 0x8a, 0x0c, 0x0c, 0x8c, 0x8c, 0x0e, 0x0e, 0x8e, 0x8e, 0x10, 0x10, 0x90, 0x90, 0x12, 0x12, 0x92, 0x92, 0x14, 0x14, 0x94, 0x94, 0x16, 0x16, 0x96, 0x96, 0x18, 0x18, 0x98, 0x98, 0x1a, 0x1a, 0x9a, 0x9a, 0x1c, 0x1c, 0x9c, 0x9c, 0x1e, 0x1e, 0x9e, 0x9e, 0x20, 0x20, 0xa0, 0xa0, 0x22, 0x22, 0xa2, 0xa2, 0x24, 0x24, 0xa4, 0xa4, 0x26, 0x26, 0xa6, 0xa6, 0x28, 0x28, 0xa8, 0xa8, 0x2a, 0x2a, 0xaa, 0xaa, 0x2c, 0x2c, 0xac, 0xac, 0x2e, 0x2e, 0xae, 0xae, 0x30, 0x30, 0xb0, 0xb0, 0x32, 0x32, 0xb2, 0xb2, 0x34, 0x34, 0xb4, 0xb4, 0x36, 0x36, 0xb6, 0xb6, 0x38, 0x38, 0xb8, 0xb8, 0x3a, 0x3a, 0xba, 0xba, 0x3c, 0x3c, 0xbc, 0xbc, 0x3e, 0x3e, 0xbe, 0xbe, 0x40, 0x40, 0xc0, 0xc0, 0x42, 0x42, 0xc2, 0xc2, 0x44, 0x44, 0xc4, 0xc4, 0x46, 0x46, 0xc6, 0xc6, 0x48, 0x48, 0xc8, 0xc8, 0x4a, 0x4a, 0xca, 0xca, 0x4c, 0x4c, 0xcc, 0xcc, 0x4e, 0x4e, 0xce, 0xce, 0x50, 0x50, 0xd0, 0xd0, 0x52, 0x52, 0xd2, 0xd2, 0x54, 0x54, 0xd4, 0xd4, 0x56, 0x56, 0xd6, 0xd6, 0x58, 0x58, 0xd8, 0xd8, 0x5a, 0x5a, 0xda, 0xda, 0x5c, 0x5c, 0xdc, 0xdc, 0x5e, 0x5e, 0xde, 0xde, 0x60, 0x60, 0xe0, 0xe0, 0x62, 0x62, 0xe2, 0xe2, 0x64, 0x64, 0xe4, 0xe4, 0x66, 0x66, 0xe6, 0xe6, 0x68, 0x68, 0xe8, 0xe8, 0x6a, 0x6a, 0xea, 0xea, 0x6c, 0x6c, 0xec, 0xec, 0x6e, 0x6e, 0xee, 0xee, 0x70, 0x70, 0xf0, 0xf0, 0x72, 0x72, 0xf2, 0xf2, 0x74, 0x74, 0xf4, 0xf4, 0x76, 0x76, 0xf6, 0xf6, 0x78, 0x78, 0xf8, 0xf8, 0x7a, 0x7a, 0xfa, 0xfa, 0x7c, 0x7c, 0xfc, 0xfc, 0x7e, 0x7e, 0xfe, 0xfe }; static const u32 pc2[1024] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00040000, 0x00000000, 0x04000000, 0x00100000, 0x00400000, 0x00000008, 0x00000800, 0x40000000, 0x00440000, 0x00000008, 0x04000800, 0x40100000, 0x00000400, 0x00000020, 0x08000000, 0x00000100, 0x00040400, 0x00000020, 0x0c000000, 0x00100100, 0x00400400, 0x00000028, 0x08000800, 0x40000100, 0x00440400, 0x00000028, 0x0c000800, 0x40100100, 0x80000000, 0x00000010, 0x00000000, 0x00800000, 0x80040000, 0x00000010, 0x04000000, 0x00900000, 0x80400000, 0x00000018, 0x00000800, 0x40800000, 0x80440000, 0x00000018, 0x04000800, 0x40900000, 0x80000400, 0x00000030, 0x08000000, 0x00800100, 0x80040400, 0x00000030, 0x0c000000, 0x00900100, 0x80400400, 0x00000038, 0x08000800, 0x40800100, 0x80440400, 0x00000038, 0x0c000800, 0x40900100, 0x10000000, 0x00000000, 0x00200000, 0x00001000, 0x10040000, 0x00000000, 0x04200000, 0x00101000, 0x10400000, 0x00000008, 0x00200800, 0x40001000, 0x10440000, 0x00000008, 0x04200800, 0x40101000, 0x10000400, 0x00000020, 0x08200000, 0x00001100, 0x10040400, 0x00000020, 0x0c200000, 0x00101100, 0x10400400, 0x00000028, 0x08200800, 0x40001100, 0x10440400, 0x00000028, 0x0c200800, 0x40101100, 0x90000000, 0x00000010, 0x00200000, 0x00801000, 0x90040000, 0x00000010, 0x04200000, 0x00901000, 0x90400000, 0x00000018, 0x00200800, 0x40801000, 0x90440000, 0x00000018, 0x04200800, 0x40901000, 0x90000400, 0x00000030, 0x08200000, 0x00801100, 0x90040400, 0x00000030, 0x0c200000, 0x00901100, 0x90400400, 0x00000038, 0x08200800, 0x40801100, 0x90440400, 0x00000038, 0x0c200800, 0x40901100, 0x00000200, 0x00080000, 0x00000000, 0x00000004, 0x00040200, 0x00080000, 0x04000000, 0x00100004, 0x00400200, 0x00080008, 0x00000800, 0x40000004, 0x00440200, 0x00080008, 0x04000800, 0x40100004, 0x00000600, 0x00080020, 0x08000000, 0x00000104, 0x00040600, 0x00080020, 0x0c000000, 0x00100104, 0x00400600, 0x00080028, 0x08000800, 0x40000104, 0x00440600, 0x00080028, 0x0c000800, 0x40100104, 0x80000200, 0x00080010, 0x00000000, 0x00800004, 0x80040200, 0x00080010, 0x04000000, 0x00900004, 0x80400200, 0x00080018, 0x00000800, 0x40800004, 0x80440200, 0x00080018, 0x04000800, 0x40900004, 0x80000600, 0x00080030, 0x08000000, 0x00800104, 0x80040600, 0x00080030, 0x0c000000, 0x00900104, 0x80400600, 0x00080038, 0x08000800, 0x40800104, 0x80440600, 0x00080038, 0x0c000800, 0x40900104, 0x10000200, 0x00080000, 0x00200000, 0x00001004, 0x10040200, 0x00080000, 0x04200000, 0x00101004, 0x10400200, 0x00080008, 0x00200800, 0x40001004, 0x10440200, 0x00080008, 0x04200800, 0x40101004, 0x10000600, 0x00080020, 0x08200000, 0x00001104, 0x10040600, 0x00080020, 0x0c200000, 0x00101104, 0x10400600, 0x00080028, 0x08200800, 0x40001104, 0x10440600, 0x00080028, 0x0c200800, 0x40101104, 0x90000200, 0x00080010, 0x00200000, 0x00801004, 0x90040200, 0x00080010, 0x04200000, 0x00901004, 0x90400200, 0x00080018, 0x00200800, 0x40801004, 0x90440200, 0x00080018, 0x04200800, 0x40901004, 0x90000600, 0x00080030, 0x08200000, 0x00801104, 0x90040600, 0x00080030, 0x0c200000, 0x00901104, 0x90400600, 0x00080038, 0x08200800, 0x40801104, 0x90440600, 0x00080038, 0x0c200800, 0x40901104, 0x00000002, 0x00002000, 0x20000000, 0x00000001, 0x00040002, 0x00002000, 0x24000000, 0x00100001, 0x00400002, 0x00002008, 0x20000800, 0x40000001, 0x00440002, 0x00002008, 0x24000800, 0x40100001, 0x00000402, 0x00002020, 0x28000000, 0x00000101, 0x00040402, 0x00002020, 0x2c000000, 0x00100101, 0x00400402, 0x00002028, 0x28000800, 0x40000101, 0x00440402, 0x00002028, 0x2c000800, 0x40100101, 0x80000002, 0x00002010, 0x20000000, 0x00800001, 0x80040002, 0x00002010, 0x24000000, 0x00900001, 0x80400002, 0x00002018, 0x20000800, 0x40800001, 0x80440002, 0x00002018, 0x24000800, 0x40900001, 0x80000402, 0x00002030, 0x28000000, 0x00800101, 0x80040402, 0x00002030, 0x2c000000, 0x00900101, 0x80400402, 0x00002038, 0x28000800, 0x40800101, 0x80440402, 0x00002038, 0x2c000800, 0x40900101, 0x10000002, 0x00002000, 0x20200000, 0x00001001, 0x10040002, 0x00002000, 0x24200000, 0x00101001, 0x10400002, 0x00002008, 0x20200800, 0x40001001, 0x10440002, 0x00002008, 0x24200800, 0x40101001, 0x10000402, 0x00002020, 0x28200000, 0x00001101, 0x10040402, 0x00002020, 0x2c200000, 0x00101101, 0x10400402, 0x00002028, 0x28200800, 0x40001101, 0x10440402, 0x00002028, 0x2c200800, 0x40101101, 0x90000002, 0x00002010, 0x20200000, 0x00801001, 0x90040002, 0x00002010, 0x24200000, 0x00901001, 0x90400002, 0x00002018, 0x20200800, 0x40801001, 0x90440002, 0x00002018, 0x24200800, 0x40901001, 0x90000402, 0x00002030, 0x28200000, 0x00801101, 0x90040402, 0x00002030, 0x2c200000, 0x00901101, 0x90400402, 0x00002038, 0x28200800, 0x40801101, 0x90440402, 0x00002038, 0x2c200800, 0x40901101, 0x00000202, 0x00082000, 0x20000000, 0x00000005, 0x00040202, 0x00082000, 0x24000000, 0x00100005, 0x00400202, 0x00082008, 0x20000800, 0x40000005, 0x00440202, 0x00082008, 0x24000800, 0x40100005, 0x00000602, 0x00082020, 0x28000000, 0x00000105, 0x00040602, 0x00082020, 0x2c000000, 0x00100105, 0x00400602, 0x00082028, 0x28000800, 0x40000105, 0x00440602, 0x00082028, 0x2c000800, 0x40100105, 0x80000202, 0x00082010, 0x20000000, 0x00800005, 0x80040202, 0x00082010, 0x24000000, 0x00900005, 0x80400202, 0x00082018, 0x20000800, 0x40800005, 0x80440202, 0x00082018, 0x24000800, 0x40900005, 0x80000602, 0x00082030, 0x28000000, 0x00800105, 0x80040602, 0x00082030, 0x2c000000, 0x00900105, 0x80400602, 0x00082038, 0x28000800, 0x40800105, 0x80440602, 0x00082038, 0x2c000800, 0x40900105, 0x10000202, 0x00082000, 0x20200000, 0x00001005, 0x10040202, 0x00082000, 0x24200000, 0x00101005, 0x10400202, 0x00082008, 0x20200800, 0x40001005, 0x10440202, 0x00082008, 0x24200800, 0x40101005, 0x10000602, 0x00082020, 0x28200000, 0x00001105, 0x10040602, 0x00082020, 0x2c200000, 0x00101105, 0x10400602, 0x00082028, 0x28200800, 0x40001105, 0x10440602, 0x00082028, 0x2c200800, 0x40101105, 0x90000202, 0x00082010, 0x20200000, 0x00801005, 0x90040202, 0x00082010, 0x24200000, 0x00901005, 0x90400202, 0x00082018, 0x20200800, 0x40801005, 0x90440202, 0x00082018, 0x24200800, 0x40901005, 0x90000602, 0x00082030, 0x28200000, 0x00801105, 0x90040602, 0x00082030, 0x2c200000, 0x00901105, 0x90400602, 0x00082038, 0x28200800, 0x40801105, 0x90440602, 0x00082038, 0x2c200800, 0x40901105, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008, 0x00080000, 0x10000000, 0x02000000, 0x00000000, 0x00000080, 0x00001000, 0x02000000, 0x00000008, 0x00080080, 0x10001000, 0x00004000, 0x00000000, 0x00000040, 0x00040000, 0x00004000, 0x00000008, 0x00080040, 0x10040000, 0x02004000, 0x00000000, 0x000000c0, 0x00041000, 0x02004000, 0x00000008, 0x000800c0, 0x10041000, 0x00020000, 0x00008000, 0x08000000, 0x00200000, 0x00020000, 0x00008008, 0x08080000, 0x10200000, 0x02020000, 0x00008000, 0x08000080, 0x00201000, 0x02020000, 0x00008008, 0x08080080, 0x10201000, 0x00024000, 0x00008000, 0x08000040, 0x00240000, 0x00024000, 0x00008008, 0x08080040, 0x10240000, 0x02024000, 0x00008000, 0x080000c0, 0x00241000, 0x02024000, 0x00008008, 0x080800c0, 0x10241000, 0x00000000, 0x01000000, 0x00002000, 0x00000020, 0x00000000, 0x01000008, 0x00082000, 0x10000020, 0x02000000, 0x01000000, 0x00002080, 0x00001020, 0x02000000, 0x01000008, 0x00082080, 0x10001020, 0x00004000, 0x01000000, 0x00002040, 0x00040020, 0x00004000, 0x01000008, 0x00082040, 0x10040020, 0x02004000, 0x01000000, 0x000020c0, 0x00041020, 0x02004000, 0x01000008, 0x000820c0, 0x10041020, 0x00020000, 0x01008000, 0x08002000, 0x00200020, 0x00020000, 0x01008008, 0x08082000, 0x10200020, 0x02020000, 0x01008000, 0x08002080, 0x00201020, 0x02020000, 0x01008008, 0x08082080, 0x10201020, 0x00024000, 0x01008000, 0x08002040, 0x00240020, 0x00024000, 0x01008008, 0x08082040, 0x10240020, 0x02024000, 0x01008000, 0x080020c0, 0x00241020, 0x02024000, 0x01008008, 0x080820c0, 0x10241020, 0x00000400, 0x04000000, 0x00100000, 0x00000004, 0x00000400, 0x04000008, 0x00180000, 0x10000004, 0x02000400, 0x04000000, 0x00100080, 0x00001004, 0x02000400, 0x04000008, 0x00180080, 0x10001004, 0x00004400, 0x04000000, 0x00100040, 0x00040004, 0x00004400, 0x04000008, 0x00180040, 0x10040004, 0x02004400, 0x04000000, 0x001000c0, 0x00041004, 0x02004400, 0x04000008, 0x001800c0, 0x10041004, 0x00020400, 0x04008000, 0x08100000, 0x00200004, 0x00020400, 0x04008008, 0x08180000, 0x10200004, 0x02020400, 0x04008000, 0x08100080, 0x00201004, 0x02020400, 0x04008008, 0x08180080, 0x10201004, 0x00024400, 0x04008000, 0x08100040, 0x00240004, 0x00024400, 0x04008008, 0x08180040, 0x10240004, 0x02024400, 0x04008000, 0x081000c0, 0x00241004, 0x02024400, 0x04008008, 0x081800c0, 0x10241004, 0x00000400, 0x05000000, 0x00102000, 0x00000024, 0x00000400, 0x05000008, 0x00182000, 0x10000024, 0x02000400, 0x05000000, 0x00102080, 0x00001024, 0x02000400, 0x05000008, 0x00182080, 0x10001024, 0x00004400, 0x05000000, 0x00102040, 0x00040024, 0x00004400, 0x05000008, 0x00182040, 0x10040024, 0x02004400, 0x05000000, 0x001020c0, 0x00041024, 0x02004400, 0x05000008, 0x001820c0, 0x10041024, 0x00020400, 0x05008000, 0x08102000, 0x00200024, 0x00020400, 0x05008008, 0x08182000, 0x10200024, 0x02020400, 0x05008000, 0x08102080, 0x00201024, 0x02020400, 0x05008008, 0x08182080, 0x10201024, 0x00024400, 0x05008000, 0x08102040, 0x00240024, 0x00024400, 0x05008008, 0x08182040, 0x10240024, 0x02024400, 0x05008000, 0x081020c0, 0x00241024, 0x02024400, 0x05008008, 0x081820c0, 0x10241024, 0x00000800, 0x00010000, 0x20000000, 0x00000010, 0x00000800, 0x00010008, 0x20080000, 0x10000010, 0x02000800, 0x00010000, 0x20000080, 0x00001010, 0x02000800, 0x00010008, 0x20080080, 0x10001010, 0x00004800, 0x00010000, 0x20000040, 0x00040010, 0x00004800, 0x00010008, 0x20080040, 0x10040010, 0x02004800, 0x00010000, 0x200000c0, 0x00041010, 0x02004800, 0x00010008, 0x200800c0, 0x10041010, 0x00020800, 0x00018000, 0x28000000, 0x00200010, 0x00020800, 0x00018008, 0x28080000, 0x10200010, 0x02020800, 0x00018000, 0x28000080, 0x00201010, 0x02020800, 0x00018008, 0x28080080, 0x10201010, 0x00024800, 0x00018000, 0x28000040, 0x00240010, 0x00024800, 0x00018008, 0x28080040, 0x10240010, 0x02024800, 0x00018000, 0x280000c0, 0x00241010, 0x02024800, 0x00018008, 0x280800c0, 0x10241010, 0x00000800, 0x01010000, 0x20002000, 0x00000030, 0x00000800, 0x01010008, 0x20082000, 0x10000030, 0x02000800, 0x01010000, 0x20002080, 0x00001030, 0x02000800, 0x01010008, 0x20082080, 0x10001030, 0x00004800, 0x01010000, 0x20002040, 0x00040030, 0x00004800, 0x01010008, 0x20082040, 0x10040030, 0x02004800, 0x01010000, 0x200020c0, 0x00041030, 0x02004800, 0x01010008, 0x200820c0, 0x10041030, 0x00020800, 0x01018000, 0x28002000, 0x00200030, 0x00020800, 0x01018008, 0x28082000, 0x10200030, 0x02020800, 0x01018000, 0x28002080, 0x00201030, 0x02020800, 0x01018008, 0x28082080, 0x10201030, 0x00024800, 0x01018000, 0x28002040, 0x00240030, 0x00024800, 0x01018008, 0x28082040, 0x10240030, 0x02024800, 0x01018000, 0x280020c0, 0x00241030, 0x02024800, 0x01018008, 0x280820c0, 0x10241030, 0x00000c00, 0x04010000, 0x20100000, 0x00000014, 0x00000c00, 0x04010008, 0x20180000, 0x10000014, 0x02000c00, 0x04010000, 0x20100080, 0x00001014, 0x02000c00, 0x04010008, 0x20180080, 0x10001014, 0x00004c00, 0x04010000, 0x20100040, 0x00040014, 0x00004c00, 0x04010008, 0x20180040, 0x10040014, 0x02004c00, 0x04010000, 0x201000c0, 0x00041014, 0x02004c00, 0x04010008, 0x201800c0, 0x10041014, 0x00020c00, 0x04018000, 0x28100000, 0x00200014, 0x00020c00, 0x04018008, 0x28180000, 0x10200014, 0x02020c00, 0x04018000, 0x28100080, 0x00201014, 0x02020c00, 0x04018008, 0x28180080, 0x10201014, 0x00024c00, 0x04018000, 0x28100040, 0x00240014, 0x00024c00, 0x04018008, 0x28180040, 0x10240014, 0x02024c00, 0x04018000, 0x281000c0, 0x00241014, 0x02024c00, 0x04018008, 0x281800c0, 0x10241014, 0x00000c00, 0x05010000, 0x20102000, 0x00000034, 0x00000c00, 0x05010008, 0x20182000, 0x10000034, 0x02000c00, 0x05010000, 0x20102080, 0x00001034, 0x02000c00, 0x05010008, 0x20182080, 0x10001034, 0x00004c00, 0x05010000, 0x20102040, 0x00040034, 0x00004c00, 0x05010008, 0x20182040, 0x10040034, 0x02004c00, 0x05010000, 0x201020c0, 0x00041034, 0x02004c00, 0x05010008, 0x201820c0, 0x10041034, 0x00020c00, 0x05018000, 0x28102000, 0x00200034, 0x00020c00, 0x05018008, 0x28182000, 0x10200034, 0x02020c00, 0x05018000, 0x28102080, 0x00201034, 0x02020c00, 0x05018008, 0x28182080, 0x10201034, 0x00024c00, 0x05018000, 0x28102040, 0x00240034, 0x00024c00, 0x05018008, 0x28182040, 0x10240034, 0x02024c00, 0x05018000, 0x281020c0, 0x00241034, 0x02024c00, 0x05018008, 0x281820c0, 0x10241034 }; /* S-box lookup tables */ static const u32 S1[64] = { 0x01010400, 0x00000000, 0x00010000, 0x01010404, 0x01010004, 0x00010404, 0x00000004, 0x00010000, 0x00000400, 0x01010400, 0x01010404, 0x00000400, 0x01000404, 0x01010004, 0x01000000, 0x00000004, 0x00000404, 0x01000400, 0x01000400, 0x00010400, 0x00010400, 0x01010000, 0x01010000, 0x01000404, 0x00010004, 0x01000004, 0x01000004, 0x00010004, 0x00000000, 0x00000404, 0x00010404, 0x01000000, 0x00010000, 0x01010404, 0x00000004, 0x01010000, 0x01010400, 0x01000000, 0x01000000, 0x00000400, 0x01010004, 0x00010000, 0x00010400, 0x01000004, 0x00000400, 0x00000004, 0x01000404, 0x00010404, 0x01010404, 0x00010004, 0x01010000, 0x01000404, 0x01000004, 0x00000404, 0x00010404, 0x01010400, 0x00000404, 0x01000400, 0x01000400, 0x00000000, 0x00010004, 0x00010400, 0x00000000, 0x01010004 }; static const u32 S2[64] = { 0x80108020, 0x80008000, 0x00008000, 0x00108020, 0x00100000, 0x00000020, 0x80100020, 0x80008020, 0x80000020, 0x80108020, 0x80108000, 0x80000000, 0x80008000, 0x00100000, 0x00000020, 0x80100020, 0x00108000, 0x00100020, 0x80008020, 0x00000000, 0x80000000, 0x00008000, 0x00108020, 0x80100000, 0x00100020, 0x80000020, 0x00000000, 0x00108000, 0x00008020, 0x80108000, 0x80100000, 0x00008020, 0x00000000, 0x00108020, 0x80100020, 0x00100000, 0x80008020, 0x80100000, 0x80108000, 0x00008000, 0x80100000, 0x80008000, 0x00000020, 0x80108020, 0x00108020, 0x00000020, 0x00008000, 0x80000000, 0x00008020, 0x80108000, 0x00100000, 0x80000020, 0x00100020, 0x80008020, 0x80000020, 0x00100020, 0x00108000, 0x00000000, 0x80008000, 0x00008020, 0x80000000, 0x80100020, 0x80108020, 0x00108000 }; static const u32 S3[64] = { 0x00000208, 0x08020200, 0x00000000, 0x08020008, 0x08000200, 0x00000000, 0x00020208, 0x08000200, 0x00020008, 0x08000008, 0x08000008, 0x00020000, 0x08020208, 0x00020008, 0x08020000, 0x00000208, 0x08000000, 0x00000008, 0x08020200, 0x00000200, 0x00020200, 0x08020000, 0x08020008, 0x00020208, 0x08000208, 0x00020200, 0x00020000, 0x08000208, 0x00000008, 0x08020208, 0x00000200, 0x08000000, 0x08020200, 0x08000000, 0x00020008, 0x00000208, 0x00020000, 0x08020200, 0x08000200, 0x00000000, 0x00000200, 0x00020008, 0x08020208, 0x08000200, 0x08000008, 0x00000200, 0x00000000, 0x08020008, 0x08000208, 0x00020000, 0x08000000, 0x08020208, 0x00000008, 0x00020208, 0x00020200, 0x08000008, 0x08020000, 0x08000208, 0x00000208, 0x08020000, 0x00020208, 0x00000008, 0x08020008, 0x00020200 }; static const u32 S4[64] = { 0x00802001, 0x00002081, 0x00002081, 0x00000080, 0x00802080, 0x00800081, 0x00800001, 0x00002001, 0x00000000, 0x00802000, 0x00802000, 0x00802081, 0x00000081, 0x00000000, 0x00800080, 0x00800001, 0x00000001, 0x00002000, 0x00800000, 0x00802001, 0x00000080, 0x00800000, 0x00002001, 0x00002080, 0x00800081, 0x00000001, 0x00002080, 0x00800080, 0x00002000, 0x00802080, 0x00802081, 0x00000081, 0x00800080, 0x00800001, 0x00802000, 0x00802081, 0x00000081, 0x00000000, 0x00000000, 0x00802000, 0x00002080, 0x00800080, 0x00800081, 0x00000001, 0x00802001, 0x00002081, 0x00002081, 0x00000080, 0x00802081, 0x00000081, 0x00000001, 0x00002000, 0x00800001, 0x00002001, 0x00802080, 0x00800081, 0x00002001, 0x00002080, 0x00800000, 0x00802001, 0x00000080, 0x00800000, 0x00002000, 0x00802080 }; static const u32 S5[64] = { 0x00000100, 0x02080100, 0x02080000, 0x42000100, 0x00080000, 0x00000100, 0x40000000, 0x02080000, 0x40080100, 0x00080000, 0x02000100, 0x40080100, 0x42000100, 0x42080000, 0x00080100, 0x40000000, 0x02000000, 0x40080000, 0x40080000, 0x00000000, 0x40000100, 0x42080100, 0x42080100, 0x02000100, 0x42080000, 0x40000100, 0x00000000, 0x42000000, 0x02080100, 0x02000000, 0x42000000, 0x00080100, 0x00080000, 0x42000100, 0x00000100, 0x02000000, 0x40000000, 0x02080000, 0x42000100, 0x40080100, 0x02000100, 0x40000000, 0x42080000, 0x02080100, 0x40080100, 0x00000100, 0x02000000, 0x42080000, 0x42080100, 0x00080100, 0x42000000, 0x42080100, 0x02080000, 0x00000000, 0x40080000, 0x42000000, 0x00080100, 0x02000100, 0x40000100, 0x00080000, 0x00000000, 0x40080000, 0x02080100, 0x40000100 }; static const u32 S6[64] = { 0x20000010, 0x20400000, 0x00004000, 0x20404010, 0x20400000, 0x00000010, 0x20404010, 0x00400000, 0x20004000, 0x00404010, 0x00400000, 0x20000010, 0x00400010, 0x20004000, 0x20000000, 0x00004010, 0x00000000, 0x00400010, 0x20004010, 0x00004000, 0x00404000, 0x20004010, 0x00000010, 0x20400010, 0x20400010, 0x00000000, 0x00404010, 0x20404000, 0x00004010, 0x00404000, 0x20404000, 0x20000000, 0x20004000, 0x00000010, 0x20400010, 0x00404000, 0x20404010, 0x00400000, 0x00004010, 0x20000010, 0x00400000, 0x20004000, 0x20000000, 0x00004010, 0x20000010, 0x20404010, 0x00404000, 0x20400000, 0x00404010, 0x20404000, 0x00000000, 0x20400010, 0x00000010, 0x00004000, 0x20400000, 0x00404010, 0x00004000, 0x00400010, 0x20004010, 0x00000000, 0x20404000, 0x20000000, 0x00400010, 0x20004010 }; static const u32 S7[64] = { 0x00200000, 0x04200002, 0x04000802, 0x00000000, 0x00000800, 0x04000802, 0x00200802, 0x04200800, 0x04200802, 0x00200000, 0x00000000, 0x04000002, 0x00000002, 0x04000000, 0x04200002, 0x00000802, 0x04000800, 0x00200802, 0x00200002, 0x04000800, 0x04000002, 0x04200000, 0x04200800, 0x00200002, 0x04200000, 0x00000800, 0x00000802, 0x04200802, 0x00200800, 0x00000002, 0x04000000, 0x00200800, 0x04000000, 0x00200800, 0x00200000, 0x04000802, 0x04000802, 0x04200002, 0x04200002, 0x00000002, 0x00200002, 0x04000000, 0x04000800, 0x00200000, 0x04200800, 0x00000802, 0x00200802, 0x04200800, 0x00000802, 0x04000002, 0x04200802, 0x04200000, 0x00200800, 0x00000000, 0x00000002, 0x04200802, 0x00000000, 0x00200802, 0x04200000, 0x00000800, 0x04000002, 0x04000800, 0x00000800, 0x00200002 }; static const u32 S8[64] = { 0x10001040, 0x00001000, 0x00040000, 0x10041040, 0x10000000, 0x10001040, 0x00000040, 0x10000000, 0x00040040, 0x10040000, 0x10041040, 0x00041000, 0x10041000, 0x00041040, 0x00001000, 0x00000040, 0x10040000, 0x10000040, 0x10001000, 0x00001040, 0x00041000, 0x00040040, 0x10040040, 0x10041000, 0x00001040, 0x00000000, 0x00000000, 0x10040040, 0x10000040, 0x10001000, 0x00041040, 0x00040000, 0x00041040, 0x00040000, 0x10041000, 0x00001000, 0x00000040, 0x10040040, 0x00001000, 0x00041040, 0x10001000, 0x00000040, 0x10000040, 0x10040000, 0x10040040, 0x10000000, 0x00040000, 0x10001040, 0x00000000, 0x10041040, 0x00040040, 0x10000040, 0x10040000, 0x10001000, 0x10001040, 0x00000000, 0x10041040, 0x00041000, 0x00041000, 0x00001040, 0x00001040, 0x00040040, 0x10000000, 0x10041000 }; /* Encryption components: IP, FP, and round function */ #define IP(L, R, T) \ ROL(R, 4); \ T = L; \ L ^= R; \ L &= 0xf0f0f0f0; \ R ^= L; \ L ^= T; \ ROL(R, 12); \ T = L; \ L ^= R; \ L &= 0xffff0000; \ R ^= L; \ L ^= T; \ ROR(R, 14); \ T = L; \ L ^= R; \ L &= 0xcccccccc; \ R ^= L; \ L ^= T; \ ROL(R, 6); \ T = L; \ L ^= R; \ L &= 0xff00ff00; \ R ^= L; \ L ^= T; \ ROR(R, 7); \ T = L; \ L ^= R; \ L &= 0xaaaaaaaa; \ R ^= L; \ L ^= T; \ ROL(L, 1); #define FP(L, R, T) \ ROR(L, 1); \ T = L; \ L ^= R; \ L &= 0xaaaaaaaa; \ R ^= L; \ L ^= T; \ ROL(R, 7); \ T = L; \ L ^= R; \ L &= 0xff00ff00; \ R ^= L; \ L ^= T; \ ROR(R, 6); \ T = L; \ L ^= R; \ L &= 0xcccccccc; \ R ^= L; \ L ^= T; \ ROL(R, 14); \ T = L; \ L ^= R; \ L &= 0xffff0000; \ R ^= L; \ L ^= T; \ ROR(R, 12); \ T = L; \ L ^= R; \ L &= 0xf0f0f0f0; \ R ^= L; \ L ^= T; \ ROR(R, 4); #define ROUND(L, R, A, B, K, d) \ B = K[0]; A = K[1]; K += d; \ B ^= R; A ^= R; \ B &= 0x3f3f3f3f; ROR(A, 4); \ L ^= S8[0xff & B]; A &= 0x3f3f3f3f; \ L ^= S6[0xff & (B >> 8)]; B >>= 16; \ L ^= S7[0xff & A]; \ L ^= S5[0xff & (A >> 8)]; A >>= 16; \ L ^= S4[0xff & B]; \ L ^= S2[0xff & (B >> 8)]; \ L ^= S3[0xff & A]; \ L ^= S1[0xff & (A >> 8)]; /* * PC2 lookup tables are organized as 2 consecutive sets of 4 interleaved * tables of 128 elements. One set is for C_i and the other for D_i, while * the 4 interleaved tables correspond to four 7-bit subsets of C_i or D_i. * * After PC1 each of the variables a,b,c,d contains a 7 bit subset of C_i * or D_i in bits 7-1 (bit 0 being the least significant). */ #define T1(x) pt[2 * (x) + 0] #define T2(x) pt[2 * (x) + 1] #define T3(x) pt[2 * (x) + 2] #define T4(x) pt[2 * (x) + 3] #define DES_PC2(a, b, c, d) (T4(d) | T3(c) | T2(b) | T1(a)) /* * Encryption key expansion * * RFC2451: Weak key checks SHOULD be performed. * * FIPS 74: * * Keys having duals are keys which produce all zeros, all ones, or * alternating zero-one patterns in the C and D registers after Permuted * Choice 1 has operated on the key. * */ static unsigned long des_ekey(u32 *pe, const u8 *k) { /* K&R: long is at least 32 bits */ unsigned long a, b, c, d, w; const u32 *pt = pc2; d = k[4]; d &= 0x0e; d <<= 4; d |= k[0] & 0x1e; d = pc1[d]; c = k[5]; c &= 0x0e; c <<= 4; c |= k[1] & 0x1e; c = pc1[c]; b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b]; a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a]; pe[15 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; pe[14 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; pe[13 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; pe[12 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; pe[11 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; pe[10 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; pe[ 9 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; pe[ 8 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; pe[ 7 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; pe[ 6 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; pe[ 5 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; pe[ 4 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; pe[ 3 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; pe[ 2 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; pe[ 1 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; pe[ 0 * 2 + 0] = DES_PC2(b, c, d, a); /* Check if first half is weak */ w = (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]); /* Skip to next table set */ pt += 512; d = k[0]; d &= 0xe0; d >>= 4; d |= k[4] & 0xf0; d = pc1[d + 1]; c = k[1]; c &= 0xe0; c >>= 4; c |= k[5] & 0xf0; c = pc1[c + 1]; b = k[2]; b &= 0xe0; b >>= 4; b |= k[6] & 0xf0; b = pc1[b + 1]; a = k[3]; a &= 0xe0; a >>= 4; a |= k[7] & 0xf0; a = pc1[a + 1]; /* Check if second half is weak */ w |= (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]); pe[15 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; pe[14 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; pe[13 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; pe[12 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; pe[11 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; pe[10 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; pe[ 9 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; pe[ 8 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; pe[ 7 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; pe[ 6 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; pe[ 5 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; pe[ 4 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; pe[ 3 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; pe[ 2 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; pe[ 1 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; pe[ 0 * 2 + 1] = DES_PC2(b, c, d, a); /* Fixup: 2413 5768 -> 1357 2468 */ for (d = 0; d < 16; ++d) { a = pe[2 * d]; b = pe[2 * d + 1]; c = a ^ b; c &= 0xffff0000; a ^= c; b ^= c; ROL(b, 18); pe[2 * d] = a; pe[2 * d + 1] = b; } /* Zero if weak key */ return w; } int des_expand_key(struct des_ctx *ctx, const u8 *key, unsigned int keylen) { if (keylen != DES_KEY_SIZE) return -EINVAL; return des_ekey(ctx->expkey, key) ? 0 : -ENOKEY; } EXPORT_SYMBOL_GPL(des_expand_key); /* * Decryption key expansion * * No weak key checking is performed, as this is only used by triple DES * */ static void dkey(u32 *pe, const u8 *k) { /* K&R: long is at least 32 bits */ unsigned long a, b, c, d; const u32 *pt = pc2; d = k[4]; d &= 0x0e; d <<= 4; d |= k[0] & 0x1e; d = pc1[d]; c = k[5]; c &= 0x0e; c <<= 4; c |= k[1] & 0x1e; c = pc1[c]; b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b]; a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a]; pe[ 0 * 2] = DES_PC2(a, b, c, d); d = rs[d]; pe[ 1 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; pe[ 2 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; pe[ 3 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; pe[ 4 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; pe[ 5 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; pe[ 6 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; pe[ 7 * 2] = DES_PC2(d, a, b, c); c = rs[c]; pe[ 8 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; pe[ 9 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; pe[10 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; pe[11 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; pe[12 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; pe[13 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; pe[14 * 2] = DES_PC2(c, d, a, b); b = rs[b]; pe[15 * 2] = DES_PC2(b, c, d, a); /* Skip to next table set */ pt += 512; d = k[0]; d &= 0xe0; d >>= 4; d |= k[4] & 0xf0; d = pc1[d + 1]; c = k[1]; c &= 0xe0; c >>= 4; c |= k[5] & 0xf0; c = pc1[c + 1]; b = k[2]; b &= 0xe0; b >>= 4; b |= k[6] & 0xf0; b = pc1[b + 1]; a = k[3]; a &= 0xe0; a >>= 4; a |= k[7] & 0xf0; a = pc1[a + 1]; pe[ 0 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; pe[ 1 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; pe[ 2 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; pe[ 3 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; pe[ 4 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; pe[ 5 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; pe[ 6 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; pe[ 7 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; pe[ 8 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; pe[ 9 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; pe[10 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; pe[11 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; pe[12 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; pe[13 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; pe[14 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; pe[15 * 2 + 1] = DES_PC2(b, c, d, a); /* Fixup: 2413 5768 -> 1357 2468 */ for (d = 0; d < 16; ++d) { a = pe[2 * d]; b = pe[2 * d + 1]; c = a ^ b; c &= 0xffff0000; a ^= c; b ^= c; ROL(b, 18); pe[2 * d] = a; pe[2 * d + 1] = b; } } void des_encrypt(const struct des_ctx *ctx, u8 *dst, const u8 *src) { const u32 *K = ctx->expkey; u32 L, R, A, B; int i; L = get_unaligned_le32(src); R = get_unaligned_le32(src + 4); IP(L, R, A); for (i = 0; i < 8; i++) { ROUND(L, R, A, B, K, 2); ROUND(R, L, A, B, K, 2); } FP(R, L, A); put_unaligned_le32(R, dst); put_unaligned_le32(L, dst + 4); } EXPORT_SYMBOL_GPL(des_encrypt); void des_decrypt(const struct des_ctx *ctx, u8 *dst, const u8 *src) { const u32 *K = ctx->expkey + DES_EXPKEY_WORDS - 2; u32 L, R, A, B; int i; L = get_unaligned_le32(src); R = get_unaligned_le32(src + 4); IP(L, R, A); for (i = 0; i < 8; i++) { ROUND(L, R, A, B, K, -2); ROUND(R, L, A, B, K, -2); } FP(R, L, A); put_unaligned_le32(R, dst); put_unaligned_le32(L, dst + 4); } EXPORT_SYMBOL_GPL(des_decrypt); int des3_ede_expand_key(struct des3_ede_ctx *ctx, const u8 *key, unsigned int keylen) { u32 *pe = ctx->expkey; int err; if (keylen != DES3_EDE_KEY_SIZE) return -EINVAL; err = des3_ede_verify_key(key, keylen, true); if (err && err != -ENOKEY) return err; des_ekey(pe, key); pe += DES_EXPKEY_WORDS; key += DES_KEY_SIZE; dkey(pe, key); pe += DES_EXPKEY_WORDS; key += DES_KEY_SIZE; des_ekey(pe, key); return err; } EXPORT_SYMBOL_GPL(des3_ede_expand_key); void des3_ede_encrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src) { const u32 *K = dctx->expkey; u32 L, R, A, B; int i; L = get_unaligned_le32(src); R = get_unaligned_le32(src + 4); IP(L, R, A); for (i = 0; i < 8; i++) { ROUND(L, R, A, B, K, 2); ROUND(R, L, A, B, K, 2); } for (i = 0; i < 8; i++) { ROUND(R, L, A, B, K, 2); ROUND(L, R, A, B, K, 2); } for (i = 0; i < 8; i++) { ROUND(L, R, A, B, K, 2); ROUND(R, L, A, B, K, 2); } FP(R, L, A); put_unaligned_le32(R, dst); put_unaligned_le32(L, dst + 4); } EXPORT_SYMBOL_GPL(des3_ede_encrypt); void des3_ede_decrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src) { const u32 *K = dctx->expkey + DES3_EDE_EXPKEY_WORDS - 2; u32 L, R, A, B; int i; L = get_unaligned_le32(src); R = get_unaligned_le32(src + 4); IP(L, R, A); for (i = 0; i < 8; i++) { ROUND(L, R, A, B, K, -2); ROUND(R, L, A, B, K, -2); } for (i = 0; i < 8; i++) { ROUND(R, L, A, B, K, -2); ROUND(L, R, A, B, K, -2); } for (i = 0; i < 8; i++) { ROUND(L, R, A, B, K, -2); ROUND(R, L, A, B, K, -2); } FP(R, L, A); put_unaligned_le32(R, dst); put_unaligned_le32(L, dst + 4); } EXPORT_SYMBOL_GPL(des3_ede_decrypt); MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms"); MODULE_LICENSE("GPL");
103 7 15 20 45 10 19 9 8 2 2 2 7 51 41 9 51 51 51 51 19 45 18 30 30 30 30 45 3 45 45 48 48 5 5 5 5 5 5 44 44 44 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4 4 1 4 4 4 4 3 3 3 3 3 3 3 3 3 3 3 3 2 1 2 2 4 46 46 3 5 5 5 5 5 2 48 45 20 20 20 20 48 48 48 48 48 48 48 48 47 48 48 48 48 47 41 9 40 48 47 48 48 48 47 46 46 5 45 45 45 45 48 51 51 51 51 51 51 50 50 20 51 51 51 50 51 51 51 50 33 2 2 2 2 51 48 4 48 19 19 48 17 48 7 7 15 15 15 1 15 7 7 7 7 28 22 22 22 22 15 15 15 15 77 77 77 76 77 77 27 27 2 27 27 2 27 63 77 77 77 77 77 5 77 48 48 48 58 47 47 47 47 73 74 73 75 62 62 13 10 57 58 58 57 75 28 75 28 75 28 75 6 74 75 63 75 5 5 5 5 5 5 5 5 5 5 5 45 45 28 28 3 3 3 1 3 26 26 27 27 2 2 3 3 9 4 2 2 1 2 2 14 14 2 15 15 15 15 15 2 2 15 15 2 15 2 15 1 15 15 14 13 13 13 26 26 26 26 27 1 27 26 27 27 27 27 27 27 26 1 27 27 27 25 27 23 27 2 25 5 25 2 26 26 25 24 26 1 26 1 25 25 26 26 26 26 25 26 26 26 26 3 26 3 26 5 26 1 26 4 26 26 26 26 2 13 13 13 11 13 1 13 13 13 13 13 3 3 15 3 3 3 3 3 3 3 2 2 2 2 2 2 2 2 8 8 8 8 8 3 2 1 3 10 9 8 7 7 6 6 5 5 5 5 4 4 3 3 3 3 3 2 3 3 10 2 10 9 9 9 9 8 9 2 2 2 4 4 2 2 2 1 1 38 2 38 1 37 37 36 3 37 1 37 5 4 36 3 2 2 1 34 6 1 28 5 2 33 4 1 3 1 2 1 30 29 4 30 3 3 30 4 4 27 18 26 3 1 2 1 2 2 2 1 1 1 1 1 1 3 2 1 1 1 1 1 1 1 2 2 2 2 2 2 2 1 2 2 301 301 2 2 301 2 2 7 301 301 7 19 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 // SPDX-License-Identifier: GPL-2.0-only /* * mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211 * Copyright (c) 2008, Jouni Malinen <j@w1.fi> * Copyright (c) 2011, Javier Lopez <jlopex@gmail.com> * Copyright (c) 2016 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2024 Intel Corporation */ /* * TODO: * - Add TSF sync and fix IBSS beacon transmission by adding * competition for "air time" at TBTT * - RX filtering based on filter configuration (data->rx_filter) */ #include <linux/list.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <net/dst.h> #include <net/xfrm.h> #include <net/mac80211.h> #include <net/ieee80211_radiotap.h> #include <linux/if_arp.h> #include <linux/rtnetlink.h> #include <linux/etherdevice.h> #include <linux/platform_device.h> #include <linux/debugfs.h> #include <linux/module.h> #include <linux/ktime.h> #include <net/genetlink.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <linux/rhashtable.h> #include <linux/nospec.h> #include <linux/virtio.h> #include <linux/virtio_ids.h> #include <linux/virtio_config.h> #include "mac80211_hwsim.h" #define WARN_QUEUE 100 #define MAX_QUEUE 200 MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211"); MODULE_LICENSE("GPL"); static int radios = 2; module_param(radios, int, 0444); MODULE_PARM_DESC(radios, "Number of simulated radios"); static int channels = 1; module_param(channels, int, 0444); MODULE_PARM_DESC(channels, "Number of concurrent channels"); static bool paged_rx = false; module_param(paged_rx, bool, 0644); MODULE_PARM_DESC(paged_rx, "Use paged SKBs for RX instead of linear ones"); static bool rctbl = false; module_param(rctbl, bool, 0444); MODULE_PARM_DESC(rctbl, "Handle rate control table"); static bool support_p2p_device = true; module_param(support_p2p_device, bool, 0444); MODULE_PARM_DESC(support_p2p_device, "Support P2P-Device interface type"); static bool mlo; module_param(mlo, bool, 0444); MODULE_PARM_DESC(mlo, "Support MLO"); static bool multi_radio; module_param(multi_radio, bool, 0444); MODULE_PARM_DESC(multi_radio, "Support Multiple Radios per wiphy"); /** * enum hwsim_regtest - the type of regulatory tests we offer * * @HWSIM_REGTEST_DISABLED: No regulatory tests are performed, * this is the default value. * @HWSIM_REGTEST_DRIVER_REG_FOLLOW: Used for testing the driver regulatory * hint, only one driver regulatory hint will be sent as such the * secondary radios are expected to follow. * @HWSIM_REGTEST_DRIVER_REG_ALL: Used for testing the driver regulatory * request with all radios reporting the same regulatory domain. * @HWSIM_REGTEST_DIFF_COUNTRY: Used for testing the drivers calling * different regulatory domains requests. Expected behaviour is for * an intersection to occur but each device will still use their * respective regulatory requested domains. Subsequent radios will * use the resulting intersection. * @HWSIM_REGTEST_WORLD_ROAM: Used for testing the world roaming. We accomplish * this by using a custom beacon-capable regulatory domain for the first * radio. All other device world roam. * @HWSIM_REGTEST_CUSTOM_WORLD: Used for testing the custom world regulatory * domain requests. All radios will adhere to this custom world regulatory * domain. * @HWSIM_REGTEST_CUSTOM_WORLD_2: Used for testing 2 custom world regulatory * domain requests. The first radio will adhere to the first custom world * regulatory domain, the second one to the second custom world regulatory * domain. All other devices will world roam. * @HWSIM_REGTEST_STRICT_FOLLOW: Used for testing strict regulatory domain * settings, only the first radio will send a regulatory domain request * and use strict settings. The rest of the radios are expected to follow. * @HWSIM_REGTEST_STRICT_ALL: Used for testing strict regulatory domain * settings. All radios will adhere to this. * @HWSIM_REGTEST_STRICT_AND_DRIVER_REG: Used for testing strict regulatory * domain settings, combined with secondary driver regulatory domain * settings. The first radio will get a strict regulatory domain setting * using the first driver regulatory request and the second radio will use * non-strict settings using the second driver regulatory request. All * other devices should follow the intersection created between the * first two. * @HWSIM_REGTEST_ALL: Used for testing every possible mix. You will need * at least 6 radios for a complete test. We will test in this order: * 1 - driver custom world regulatory domain * 2 - second custom world regulatory domain * 3 - first driver regulatory domain request * 4 - second driver regulatory domain request * 5 - strict regulatory domain settings using the third driver regulatory * domain request * 6 and on - should follow the intersection of the 3rd, 4rth and 5th radio * regulatory requests. * * These are the different values you can use for the regtest * module parameter. This is useful to help test world roaming * and the driver regulatory_hint() call and combinations of these. * If you want to do specific alpha2 regulatory domain tests simply * use the userspace regulatory request as that will be respected as * well without the need of this module parameter. This is designed * only for testing the driver regulatory request, world roaming * and all possible combinations. */ enum hwsim_regtest { HWSIM_REGTEST_DISABLED = 0, HWSIM_REGTEST_DRIVER_REG_FOLLOW = 1, HWSIM_REGTEST_DRIVER_REG_ALL = 2, HWSIM_REGTEST_DIFF_COUNTRY = 3, HWSIM_REGTEST_WORLD_ROAM = 4, HWSIM_REGTEST_CUSTOM_WORLD = 5, HWSIM_REGTEST_CUSTOM_WORLD_2 = 6, HWSIM_REGTEST_STRICT_FOLLOW = 7, HWSIM_REGTEST_STRICT_ALL = 8, HWSIM_REGTEST_STRICT_AND_DRIVER_REG = 9, HWSIM_REGTEST_ALL = 10, }; /* Set to one of the HWSIM_REGTEST_* values above */ static int regtest = HWSIM_REGTEST_DISABLED; module_param(regtest, int, 0444); MODULE_PARM_DESC(regtest, "The type of regulatory test we want to run"); static const char *hwsim_alpha2s[] = { "FI", "AL", "US", "DE", "JP", "AL", }; static const struct ieee80211_regdomain hwsim_world_regdom_custom_01 = { .n_reg_rules = 5, .alpha2 = "99", .reg_rules = { REG_RULE(2412-10, 2462+10, 40, 0, 20, 0), REG_RULE(2484-10, 2484+10, 40, 0, 20, 0), REG_RULE(5150-10, 5240+10, 40, 0, 30, 0), REG_RULE(5745-10, 5825+10, 40, 0, 30, 0), REG_RULE(5855-10, 5925+10, 40, 0, 33, 0), } }; static const struct ieee80211_regdomain hwsim_world_regdom_custom_02 = { .n_reg_rules = 3, .alpha2 = "99", .reg_rules = { REG_RULE(2412-10, 2462+10, 40, 0, 20, 0), REG_RULE(5725-10, 5850+10, 40, 0, 30, NL80211_RRF_NO_IR), REG_RULE(5855-10, 5925+10, 40, 0, 33, 0), } }; static const struct ieee80211_regdomain hwsim_world_regdom_custom_03 = { .n_reg_rules = 6, .alpha2 = "99", .reg_rules = { REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0), REG_RULE(2484 - 10, 2484 + 10, 40, 0, 20, 0), REG_RULE(5150 - 10, 5240 + 10, 40, 0, 30, 0), REG_RULE(5745 - 10, 5825 + 10, 40, 0, 30, 0), REG_RULE(5855 - 10, 5925 + 10, 40, 0, 33, 0), REG_RULE(5955 - 10, 7125 + 10, 320, 0, 33, 0), } }; static const struct ieee80211_regdomain hwsim_world_regdom_custom_04 = { .n_reg_rules = 6, .alpha2 = "99", .reg_rules = { REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0), REG_RULE(2484 - 10, 2484 + 10, 40, 0, 20, 0), REG_RULE(5150 - 10, 5240 + 10, 80, 0, 30, NL80211_RRF_AUTO_BW), REG_RULE(5260 - 10, 5320 + 10, 80, 0, 30, NL80211_RRF_DFS_CONCURRENT | NL80211_RRF_DFS | NL80211_RRF_AUTO_BW), REG_RULE(5500 - 10, 5720 + 10, 160, 0, 30, NL80211_RRF_DFS_CONCURRENT | NL80211_RRF_DFS), REG_RULE(5745 - 10, 5825 + 10, 80, 0, 30, 0), REG_RULE(5855 - 10, 5925 + 10, 80, 0, 33, 0), } }; static const struct ieee80211_regdomain *hwsim_world_regdom_custom[] = { &hwsim_world_regdom_custom_01, &hwsim_world_regdom_custom_02, &hwsim_world_regdom_custom_03, &hwsim_world_regdom_custom_04, }; struct hwsim_vif_priv { u32 magic; u32 skip_beacons[IEEE80211_MLD_MAX_NUM_LINKS]; u8 bssid[ETH_ALEN]; bool assoc; bool bcn_en; u16 aid; }; #define HWSIM_VIF_MAGIC 0x69537748 static inline void hwsim_check_magic(struct ieee80211_vif *vif) { struct hwsim_vif_priv *vp = (void *)vif->drv_priv; WARN(vp->magic != HWSIM_VIF_MAGIC, "Invalid VIF (%p) magic %#x, %pM, %d/%d\n", vif, vp->magic, vif->addr, vif->type, vif->p2p); } static inline void hwsim_set_magic(struct ieee80211_vif *vif) { struct hwsim_vif_priv *vp = (void *)vif->drv_priv; vp->magic = HWSIM_VIF_MAGIC; } static inline void hwsim_clear_magic(struct ieee80211_vif *vif) { struct hwsim_vif_priv *vp = (void *)vif->drv_priv; vp->magic = 0; } struct hwsim_sta_priv { u32 magic; unsigned int last_link; u16 active_links_rx; }; #define HWSIM_STA_MAGIC 0x6d537749 static inline void hwsim_check_sta_magic(struct ieee80211_sta *sta) { struct hwsim_sta_priv *sp = (void *)sta->drv_priv; WARN_ON(sp->magic != HWSIM_STA_MAGIC); } static inline void hwsim_set_sta_magic(struct ieee80211_sta *sta) { struct hwsim_sta_priv *sp = (void *)sta->drv_priv; sp->magic = HWSIM_STA_MAGIC; } static inline void hwsim_clear_sta_magic(struct ieee80211_sta *sta) { struct hwsim_sta_priv *sp = (void *)sta->drv_priv; sp->magic = 0; } struct hwsim_chanctx_priv { u32 magic; }; #define HWSIM_CHANCTX_MAGIC 0x6d53774a static inline void hwsim_check_chanctx_magic(struct ieee80211_chanctx_conf *c) { struct hwsim_chanctx_priv *cp = (void *)c->drv_priv; WARN_ON(cp->magic != HWSIM_CHANCTX_MAGIC); } static inline void hwsim_set_chanctx_magic(struct ieee80211_chanctx_conf *c) { struct hwsim_chanctx_priv *cp = (void *)c->drv_priv; cp->magic = HWSIM_CHANCTX_MAGIC; } static inline void hwsim_clear_chanctx_magic(struct ieee80211_chanctx_conf *c) { struct hwsim_chanctx_priv *cp = (void *)c->drv_priv; cp->magic = 0; } static unsigned int hwsim_net_id; static DEFINE_IDA(hwsim_netgroup_ida); struct hwsim_net { int netgroup; u32 wmediumd; }; static inline int hwsim_net_get_netgroup(struct net *net) { struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id); return hwsim_net->netgroup; } static inline int hwsim_net_set_netgroup(struct net *net) { struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id); hwsim_net->netgroup = ida_alloc(&hwsim_netgroup_ida, GFP_KERNEL); return hwsim_net->netgroup >= 0 ? 0 : -ENOMEM; } static inline u32 hwsim_net_get_wmediumd(struct net *net) { struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id); return hwsim_net->wmediumd; } static inline void hwsim_net_set_wmediumd(struct net *net, u32 portid) { struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id); hwsim_net->wmediumd = portid; } static struct class *hwsim_class; static struct net_device *hwsim_mon; /* global monitor netdev */ #define CHAN2G(_freq) { \ .band = NL80211_BAND_2GHZ, \ .center_freq = (_freq), \ .hw_value = (_freq), \ } #define CHAN5G(_freq) { \ .band = NL80211_BAND_5GHZ, \ .center_freq = (_freq), \ .hw_value = (_freq), \ } #define CHAN6G(_freq) { \ .band = NL80211_BAND_6GHZ, \ .center_freq = (_freq), \ .hw_value = (_freq), \ } static const struct ieee80211_channel hwsim_channels_2ghz[] = { CHAN2G(2412), /* Channel 1 */ CHAN2G(2417), /* Channel 2 */ CHAN2G(2422), /* Channel 3 */ CHAN2G(2427), /* Channel 4 */ CHAN2G(2432), /* Channel 5 */ CHAN2G(2437), /* Channel 6 */ CHAN2G(2442), /* Channel 7 */ CHAN2G(2447), /* Channel 8 */ CHAN2G(2452), /* Channel 9 */ CHAN2G(2457), /* Channel 10 */ CHAN2G(2462), /* Channel 11 */ CHAN2G(2467), /* Channel 12 */ CHAN2G(2472), /* Channel 13 */ CHAN2G(2484), /* Channel 14 */ }; static const struct ieee80211_channel hwsim_channels_5ghz[] = { CHAN5G(5180), /* Channel 36 */ CHAN5G(5200), /* Channel 40 */ CHAN5G(5220), /* Channel 44 */ CHAN5G(5240), /* Channel 48 */ CHAN5G(5260), /* Channel 52 */ CHAN5G(5280), /* Channel 56 */ CHAN5G(5300), /* Channel 60 */ CHAN5G(5320), /* Channel 64 */ CHAN5G(5500), /* Channel 100 */ CHAN5G(5520), /* Channel 104 */ CHAN5G(5540), /* Channel 108 */ CHAN5G(5560), /* Channel 112 */ CHAN5G(5580), /* Channel 116 */ CHAN5G(5600), /* Channel 120 */ CHAN5G(5620), /* Channel 124 */ CHAN5G(5640), /* Channel 128 */ CHAN5G(5660), /* Channel 132 */ CHAN5G(5680), /* Channel 136 */ CHAN5G(5700), /* Channel 140 */ CHAN5G(5745), /* Channel 149 */ CHAN5G(5765), /* Channel 153 */ CHAN5G(5785), /* Channel 157 */ CHAN5G(5805), /* Channel 161 */ CHAN5G(5825), /* Channel 165 */ CHAN5G(5845), /* Channel 169 */ CHAN5G(5855), /* Channel 171 */ CHAN5G(5860), /* Channel 172 */ CHAN5G(5865), /* Channel 173 */ CHAN5G(5870), /* Channel 174 */ CHAN5G(5875), /* Channel 175 */ CHAN5G(5880), /* Channel 176 */ CHAN5G(5885), /* Channel 177 */ CHAN5G(5890), /* Channel 178 */ CHAN5G(5895), /* Channel 179 */ CHAN5G(5900), /* Channel 180 */ CHAN5G(5905), /* Channel 181 */ CHAN5G(5910), /* Channel 182 */ CHAN5G(5915), /* Channel 183 */ CHAN5G(5920), /* Channel 184 */ CHAN5G(5925), /* Channel 185 */ }; static const struct ieee80211_channel hwsim_channels_6ghz[] = { CHAN6G(5955), /* Channel 1 */ CHAN6G(5975), /* Channel 5 */ CHAN6G(5995), /* Channel 9 */ CHAN6G(6015), /* Channel 13 */ CHAN6G(6035), /* Channel 17 */ CHAN6G(6055), /* Channel 21 */ CHAN6G(6075), /* Channel 25 */ CHAN6G(6095), /* Channel 29 */ CHAN6G(6115), /* Channel 33 */ CHAN6G(6135), /* Channel 37 */ CHAN6G(6155), /* Channel 41 */ CHAN6G(6175), /* Channel 45 */ CHAN6G(6195), /* Channel 49 */ CHAN6G(6215), /* Channel 53 */ CHAN6G(6235), /* Channel 57 */ CHAN6G(6255), /* Channel 61 */ CHAN6G(6275), /* Channel 65 */ CHAN6G(6295), /* Channel 69 */ CHAN6G(6315), /* Channel 73 */ CHAN6G(6335), /* Channel 77 */ CHAN6G(6355), /* Channel 81 */ CHAN6G(6375), /* Channel 85 */ CHAN6G(6395), /* Channel 89 */ CHAN6G(6415), /* Channel 93 */ CHAN6G(6435), /* Channel 97 */ CHAN6G(6455), /* Channel 181 */ CHAN6G(6475), /* Channel 105 */ CHAN6G(6495), /* Channel 109 */ CHAN6G(6515), /* Channel 113 */ CHAN6G(6535), /* Channel 117 */ CHAN6G(6555), /* Channel 121 */ CHAN6G(6575), /* Channel 125 */ CHAN6G(6595), /* Channel 129 */ CHAN6G(6615), /* Channel 133 */ CHAN6G(6635), /* Channel 137 */ CHAN6G(6655), /* Channel 141 */ CHAN6G(6675), /* Channel 145 */ CHAN6G(6695), /* Channel 149 */ CHAN6G(6715), /* Channel 153 */ CHAN6G(6735), /* Channel 157 */ CHAN6G(6755), /* Channel 161 */ CHAN6G(6775), /* Channel 165 */ CHAN6G(6795), /* Channel 169 */ CHAN6G(6815), /* Channel 173 */ CHAN6G(6835), /* Channel 177 */ CHAN6G(6855), /* Channel 181 */ CHAN6G(6875), /* Channel 185 */ CHAN6G(6895), /* Channel 189 */ CHAN6G(6915), /* Channel 193 */ CHAN6G(6935), /* Channel 197 */ CHAN6G(6955), /* Channel 201 */ CHAN6G(6975), /* Channel 205 */ CHAN6G(6995), /* Channel 209 */ CHAN6G(7015), /* Channel 213 */ CHAN6G(7035), /* Channel 217 */ CHAN6G(7055), /* Channel 221 */ CHAN6G(7075), /* Channel 225 */ CHAN6G(7095), /* Channel 229 */ CHAN6G(7115), /* Channel 233 */ }; #define NUM_S1G_CHANS_US 51 static struct ieee80211_channel hwsim_channels_s1g[NUM_S1G_CHANS_US]; static const struct ieee80211_sta_s1g_cap hwsim_s1g_cap = { .s1g = true, .cap = { S1G_CAP0_SGI_1MHZ | S1G_CAP0_SGI_2MHZ, 0, 0, S1G_CAP3_MAX_MPDU_LEN, 0, S1G_CAP5_AMPDU, 0, S1G_CAP7_DUP_1MHZ, S1G_CAP8_TWT_RESPOND | S1G_CAP8_TWT_REQUEST, 0}, .nss_mcs = { 0xfc | 1, /* MCS 7 for 1 SS */ /* RX Highest Supported Long GI Data Rate 0:7 */ 0, /* RX Highest Supported Long GI Data Rate 0:7 */ /* TX S1G MCS Map 0:6 */ 0xfa, /* TX S1G MCS Map :7 */ /* TX Highest Supported Long GI Data Rate 0:6 */ 0x80, /* TX Highest Supported Long GI Data Rate 7:8 */ /* Rx Single spatial stream and S1G-MCS Map for 1MHz */ /* Tx Single spatial stream and S1G-MCS Map for 1MHz */ 0 }, }; static void hwsim_init_s1g_channels(struct ieee80211_channel *chans) { int ch, freq; for (ch = 0; ch < NUM_S1G_CHANS_US; ch++) { freq = 902000 + (ch + 1) * 500; chans[ch].band = NL80211_BAND_S1GHZ; chans[ch].center_freq = KHZ_TO_MHZ(freq); chans[ch].freq_offset = freq % 1000; chans[ch].hw_value = ch + 1; } } static const struct ieee80211_rate hwsim_rates[] = { { .bitrate = 10 }, { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 60 }, { .bitrate = 90 }, { .bitrate = 120 }, { .bitrate = 180 }, { .bitrate = 240 }, { .bitrate = 360 }, { .bitrate = 480 }, { .bitrate = 540 } }; #define DEFAULT_RX_RSSI -50 static const u32 hwsim_ciphers[] = { WLAN_CIPHER_SUITE_WEP40, WLAN_CIPHER_SUITE_WEP104, WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, WLAN_CIPHER_SUITE_CCMP_256, WLAN_CIPHER_SUITE_GCMP, WLAN_CIPHER_SUITE_GCMP_256, WLAN_CIPHER_SUITE_AES_CMAC, WLAN_CIPHER_SUITE_BIP_CMAC_256, WLAN_CIPHER_SUITE_BIP_GMAC_128, WLAN_CIPHER_SUITE_BIP_GMAC_256, }; #define OUI_QCA 0x001374 #define QCA_NL80211_SUBCMD_TEST 1 enum qca_nl80211_vendor_subcmds { QCA_WLAN_VENDOR_ATTR_TEST = 8, QCA_WLAN_VENDOR_ATTR_MAX = QCA_WLAN_VENDOR_ATTR_TEST }; static const struct nla_policy hwsim_vendor_test_policy[QCA_WLAN_VENDOR_ATTR_MAX + 1] = { [QCA_WLAN_VENDOR_ATTR_MAX] = { .type = NLA_U32 }, }; static int mac80211_hwsim_vendor_cmd_test(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int data_len) { struct sk_buff *skb; struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_MAX + 1]; int err; u32 val; err = nla_parse_deprecated(tb, QCA_WLAN_VENDOR_ATTR_MAX, data, data_len, hwsim_vendor_test_policy, NULL); if (err) return err; if (!tb[QCA_WLAN_VENDOR_ATTR_TEST]) return -EINVAL; val = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_TEST]); wiphy_dbg(wiphy, "%s: test=%u\n", __func__, val); /* Send a vendor event as a test. Note that this would not normally be * done within a command handler, but rather, based on some other * trigger. For simplicity, this command is used to trigger the event * here. * * event_idx = 0 (index in mac80211_hwsim_vendor_commands) */ skb = cfg80211_vendor_event_alloc(wiphy, wdev, 100, 0, GFP_KERNEL); if (skb) { /* skb_put() or nla_put() will fill up data within * NL80211_ATTR_VENDOR_DATA. */ /* Add vendor data */ nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_TEST, val + 1); /* Send the event - this will call nla_nest_end() */ cfg80211_vendor_event(skb, GFP_KERNEL); } /* Send a response to the command */ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 10); if (!skb) return -ENOMEM; /* skb_put() or nla_put() will fill up data within * NL80211_ATTR_VENDOR_DATA */ nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_TEST, val + 2); return cfg80211_vendor_cmd_reply(skb); } static struct wiphy_vendor_command mac80211_hwsim_vendor_commands[] = { { .info = { .vendor_id = OUI_QCA, .subcmd = QCA_NL80211_SUBCMD_TEST }, .flags = WIPHY_VENDOR_CMD_NEED_NETDEV, .doit = mac80211_hwsim_vendor_cmd_test, .policy = hwsim_vendor_test_policy, .maxattr = QCA_WLAN_VENDOR_ATTR_MAX, } }; /* Advertise support vendor specific events */ static const struct nl80211_vendor_cmd_info mac80211_hwsim_vendor_events[] = { { .vendor_id = OUI_QCA, .subcmd = 1 }, }; static DEFINE_SPINLOCK(hwsim_radio_lock); static LIST_HEAD(hwsim_radios); static struct rhashtable hwsim_radios_rht; static int hwsim_radio_idx; static int hwsim_radios_generation = 1; static struct platform_driver mac80211_hwsim_driver = { .driver = { .name = "mac80211_hwsim", }, }; struct mac80211_hwsim_link_data { u32 link_id; u64 beacon_int /* beacon interval in us */; struct hrtimer beacon_timer; }; struct mac80211_hwsim_data { struct list_head list; struct rhash_head rht; struct ieee80211_hw *hw; struct device *dev; struct ieee80211_supported_band bands[NUM_NL80211_BANDS]; struct ieee80211_channel channels_2ghz[ARRAY_SIZE(hwsim_channels_2ghz)]; struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)]; struct ieee80211_channel channels_6ghz[ARRAY_SIZE(hwsim_channels_6ghz)]; struct ieee80211_channel channels_s1g[ARRAY_SIZE(hwsim_channels_s1g)]; struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)]; struct ieee80211_iface_combination if_combination; struct ieee80211_iface_limit if_limits[3]; int n_if_limits; struct ieee80211_iface_combination if_combination_radio; struct wiphy_radio_freq_range radio_range[NUM_NL80211_BANDS]; struct wiphy_radio radio[NUM_NL80211_BANDS]; u32 ciphers[ARRAY_SIZE(hwsim_ciphers)]; struct mac_address addresses[2]; int channels, idx; bool use_chanctx; bool destroy_on_close; u32 portid; char alpha2[2]; const struct ieee80211_regdomain *regd; struct ieee80211_channel *tmp_chan; struct ieee80211_channel *roc_chan; u32 roc_duration; struct delayed_work roc_start; struct delayed_work roc_done; struct delayed_work hw_scan; struct cfg80211_scan_request *hw_scan_request; struct ieee80211_vif *hw_scan_vif; int scan_chan_idx; u8 scan_addr[ETH_ALEN]; struct { struct ieee80211_channel *channel; unsigned long next_start, start, end; } survey_data[ARRAY_SIZE(hwsim_channels_2ghz) + ARRAY_SIZE(hwsim_channels_5ghz) + ARRAY_SIZE(hwsim_channels_6ghz)]; struct ieee80211_channel *channel; enum nl80211_chan_width bw; unsigned int rx_filter; bool started, idle, scanning; struct mutex mutex; enum ps_mode { PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL } ps; bool ps_poll_pending; struct dentry *debugfs; atomic_t pending_cookie; struct sk_buff_head pending; /* packets pending */ /* * Only radios in the same group can communicate together (the * channel has to match too). Each bit represents a group. A * radio can be in more than one group. */ u64 group; /* group shared by radios created in the same netns */ int netgroup; /* wmediumd portid responsible for netgroup of this radio */ u32 wmediumd; /* difference between this hw's clock and the real clock, in usecs */ s64 tsf_offset; s64 bcn_delta; /* absolute beacon transmission time. Used to cover up "tx" delay. */ u64 abs_bcn_ts; /* Stats */ u64 tx_pkts; u64 rx_pkts; u64 tx_bytes; u64 rx_bytes; u64 tx_dropped; u64 tx_failed; /* RSSI in rx status of the receiver */ int rx_rssi; /* only used when pmsr capability is supplied */ struct cfg80211_pmsr_capabilities pmsr_capa; struct cfg80211_pmsr_request *pmsr_request; struct wireless_dev *pmsr_request_wdev; struct mac80211_hwsim_link_data link_data[IEEE80211_MLD_MAX_NUM_LINKS]; }; static const struct rhashtable_params hwsim_rht_params = { .nelem_hint = 2, .automatic_shrinking = true, .key_len = ETH_ALEN, .key_offset = offsetof(struct mac80211_hwsim_data, addresses[1]), .head_offset = offsetof(struct mac80211_hwsim_data, rht), }; struct hwsim_radiotap_hdr { struct ieee80211_radiotap_header_fixed hdr; __le64 rt_tsft; u8 rt_flags; u8 rt_rate; __le16 rt_channel; __le16 rt_chbitmask; } __packed; struct hwsim_radiotap_ack_hdr { struct ieee80211_radiotap_header_fixed hdr; u8 rt_flags; u8 pad; __le16 rt_channel; __le16 rt_chbitmask; } __packed; static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(const u8 *addr) { return rhashtable_lookup_fast(&hwsim_radios_rht, addr, hwsim_rht_params); } /* MAC80211_HWSIM netlink family */ static struct genl_family hwsim_genl_family; enum hwsim_multicast_groups { HWSIM_MCGRP_CONFIG, }; static const struct genl_multicast_group hwsim_mcgrps[] = { [HWSIM_MCGRP_CONFIG] = { .name = "config", }, }; /* MAC80211_HWSIM netlink policy */ static const struct nla_policy hwsim_rate_info_policy[HWSIM_RATE_INFO_ATTR_MAX + 1] = { [HWSIM_RATE_INFO_ATTR_FLAGS] = { .type = NLA_U8 }, [HWSIM_RATE_INFO_ATTR_MCS] = { .type = NLA_U8 }, [HWSIM_RATE_INFO_ATTR_LEGACY] = { .type = NLA_U16 }, [HWSIM_RATE_INFO_ATTR_NSS] = { .type = NLA_U8 }, [HWSIM_RATE_INFO_ATTR_BW] = { .type = NLA_U8 }, [HWSIM_RATE_INFO_ATTR_HE_GI] = { .type = NLA_U8 }, [HWSIM_RATE_INFO_ATTR_HE_DCM] = { .type = NLA_U8 }, [HWSIM_RATE_INFO_ATTR_HE_RU_ALLOC] = { .type = NLA_U8 }, [HWSIM_RATE_INFO_ATTR_N_BOUNDED_CH] = { .type = NLA_U8 }, [HWSIM_RATE_INFO_ATTR_EHT_GI] = { .type = NLA_U8 }, [HWSIM_RATE_INFO_ATTR_EHT_RU_ALLOC] = { .type = NLA_U8 }, }; static const struct nla_policy hwsim_ftm_result_policy[NL80211_PMSR_FTM_RESP_ATTR_MAX + 1] = { [NL80211_PMSR_FTM_RESP_ATTR_FAIL_REASON] = { .type = NLA_U32 }, [NL80211_PMSR_FTM_RESP_ATTR_BURST_INDEX] = { .type = NLA_U16 }, [NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_ATTEMPTS] = { .type = NLA_U32 }, [NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_SUCCESSES] = { .type = NLA_U32 }, [NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME] = { .type = NLA_U8 }, [NL80211_PMSR_FTM_RESP_ATTR_NUM_BURSTS_EXP] = { .type = NLA_U8 }, [NL80211_PMSR_FTM_RESP_ATTR_BURST_DURATION] = { .type = NLA_U8 }, [NL80211_PMSR_FTM_RESP_ATTR_FTMS_PER_BURST] = { .type = NLA_U8 }, [NL80211_PMSR_FTM_RESP_ATTR_RSSI_AVG] = { .type = NLA_U32 }, [NL80211_PMSR_FTM_RESP_ATTR_RSSI_SPREAD] = { .type = NLA_U32 }, [NL80211_PMSR_FTM_RESP_ATTR_TX_RATE] = NLA_POLICY_NESTED(hwsim_rate_info_policy), [NL80211_PMSR_FTM_RESP_ATTR_RX_RATE] = NLA_POLICY_NESTED(hwsim_rate_info_policy), [NL80211_PMSR_FTM_RESP_ATTR_RTT_AVG] = { .type = NLA_U64 }, [NL80211_PMSR_FTM_RESP_ATTR_RTT_VARIANCE] = { .type = NLA_U64 }, [NL80211_PMSR_FTM_RESP_ATTR_RTT_SPREAD] = { .type = NLA_U64 }, [NL80211_PMSR_FTM_RESP_ATTR_DIST_AVG] = { .type = NLA_U64 }, [NL80211_PMSR_FTM_RESP_ATTR_DIST_VARIANCE] = { .type = NLA_U64 }, [NL80211_PMSR_FTM_RESP_ATTR_DIST_SPREAD] = { .type = NLA_U64 }, [NL80211_PMSR_FTM_RESP_ATTR_LCI] = { .type = NLA_STRING }, [NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC] = { .type = NLA_STRING }, }; static const struct nla_policy hwsim_pmsr_resp_type_policy[NL80211_PMSR_TYPE_MAX + 1] = { [NL80211_PMSR_TYPE_FTM] = NLA_POLICY_NESTED(hwsim_ftm_result_policy), }; static const struct nla_policy hwsim_pmsr_resp_policy[NL80211_PMSR_RESP_ATTR_MAX + 1] = { [NL80211_PMSR_RESP_ATTR_STATUS] = { .type = NLA_U32 }, [NL80211_PMSR_RESP_ATTR_HOST_TIME] = { .type = NLA_U64 }, [NL80211_PMSR_RESP_ATTR_AP_TSF] = { .type = NLA_U64 }, [NL80211_PMSR_RESP_ATTR_FINAL] = { .type = NLA_FLAG }, [NL80211_PMSR_RESP_ATTR_DATA] = NLA_POLICY_NESTED(hwsim_pmsr_resp_type_policy), }; static const struct nla_policy hwsim_pmsr_peer_result_policy[NL80211_PMSR_PEER_ATTR_MAX + 1] = { [NL80211_PMSR_PEER_ATTR_ADDR] = NLA_POLICY_ETH_ADDR_COMPAT, [NL80211_PMSR_PEER_ATTR_CHAN] = { .type = NLA_REJECT }, [NL80211_PMSR_PEER_ATTR_REQ] = { .type = NLA_REJECT }, [NL80211_PMSR_PEER_ATTR_RESP] = NLA_POLICY_NESTED(hwsim_pmsr_resp_policy), }; static const struct nla_policy hwsim_pmsr_peers_result_policy[NL80211_PMSR_ATTR_MAX + 1] = { [NL80211_PMSR_ATTR_MAX_PEERS] = { .type = NLA_REJECT }, [NL80211_PMSR_ATTR_REPORT_AP_TSF] = { .type = NLA_REJECT }, [NL80211_PMSR_ATTR_RANDOMIZE_MAC_ADDR] = { .type = NLA_REJECT }, [NL80211_PMSR_ATTR_TYPE_CAPA] = { .type = NLA_REJECT }, [NL80211_PMSR_ATTR_PEERS] = NLA_POLICY_NESTED_ARRAY(hwsim_pmsr_peer_result_policy), }; static const struct nla_policy hwsim_ftm_capa_policy[NL80211_PMSR_FTM_CAPA_ATTR_MAX + 1] = { [NL80211_PMSR_FTM_CAPA_ATTR_ASAP] = { .type = NLA_FLAG }, [NL80211_PMSR_FTM_CAPA_ATTR_NON_ASAP] = { .type = NLA_FLAG }, [NL80211_PMSR_FTM_CAPA_ATTR_REQ_LCI] = { .type = NLA_FLAG }, [NL80211_PMSR_FTM_CAPA_ATTR_REQ_CIVICLOC] = { .type = NLA_FLAG }, [NL80211_PMSR_FTM_CAPA_ATTR_PREAMBLES] = { .type = NLA_U32 }, [NL80211_PMSR_FTM_CAPA_ATTR_BANDWIDTHS] = { .type = NLA_U32 }, [NL80211_PMSR_FTM_CAPA_ATTR_MAX_BURSTS_EXPONENT] = NLA_POLICY_MAX(NLA_U8, 15), [NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST] = NLA_POLICY_MAX(NLA_U8, 31), [NL80211_PMSR_FTM_CAPA_ATTR_TRIGGER_BASED] = { .type = NLA_FLAG }, [NL80211_PMSR_FTM_CAPA_ATTR_NON_TRIGGER_BASED] = { .type = NLA_FLAG }, }; static const struct nla_policy hwsim_pmsr_capa_type_policy[NL80211_PMSR_TYPE_MAX + 1] = { [NL80211_PMSR_TYPE_FTM] = NLA_POLICY_NESTED(hwsim_ftm_capa_policy), }; static const struct nla_policy hwsim_pmsr_capa_policy[NL80211_PMSR_ATTR_MAX + 1] = { [NL80211_PMSR_ATTR_MAX_PEERS] = { .type = NLA_U32 }, [NL80211_PMSR_ATTR_REPORT_AP_TSF] = { .type = NLA_FLAG }, [NL80211_PMSR_ATTR_RANDOMIZE_MAC_ADDR] = { .type = NLA_FLAG }, [NL80211_PMSR_ATTR_TYPE_CAPA] = NLA_POLICY_NESTED(hwsim_pmsr_capa_type_policy), [NL80211_PMSR_ATTR_PEERS] = { .type = NLA_REJECT }, // only for request. }; static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = { [HWSIM_ATTR_ADDR_RECEIVER] = NLA_POLICY_ETH_ADDR_COMPAT, [HWSIM_ATTR_ADDR_TRANSMITTER] = NLA_POLICY_ETH_ADDR_COMPAT, [HWSIM_ATTR_FRAME] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, [HWSIM_ATTR_FLAGS] = { .type = NLA_U32 }, [HWSIM_ATTR_RX_RATE] = { .type = NLA_U32 }, [HWSIM_ATTR_SIGNAL] = { .type = NLA_U32 }, [HWSIM_ATTR_TX_INFO] = { .type = NLA_BINARY, .len = IEEE80211_TX_MAX_RATES * sizeof(struct hwsim_tx_rate)}, [HWSIM_ATTR_COOKIE] = { .type = NLA_U64 }, [HWSIM_ATTR_CHANNELS] = { .type = NLA_U32 }, [HWSIM_ATTR_RADIO_ID] = { .type = NLA_U32 }, [HWSIM_ATTR_REG_HINT_ALPHA2] = { .type = NLA_STRING, .len = 2 }, [HWSIM_ATTR_REG_CUSTOM_REG] = { .type = NLA_U32 }, [HWSIM_ATTR_REG_STRICT_REG] = { .type = NLA_FLAG }, [HWSIM_ATTR_SUPPORT_P2P_DEVICE] = { .type = NLA_FLAG }, [HWSIM_ATTR_USE_CHANCTX] = { .type = NLA_FLAG }, [HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE] = { .type = NLA_FLAG }, [HWSIM_ATTR_RADIO_NAME] = { .type = NLA_STRING }, [HWSIM_ATTR_NO_VIF] = { .type = NLA_FLAG }, [HWSIM_ATTR_FREQ] = { .type = NLA_U32 }, [HWSIM_ATTR_TX_INFO_FLAGS] = { .type = NLA_BINARY }, [HWSIM_ATTR_PERM_ADDR] = NLA_POLICY_ETH_ADDR_COMPAT, [HWSIM_ATTR_IFTYPE_SUPPORT] = { .type = NLA_U32 }, [HWSIM_ATTR_CIPHER_SUPPORT] = { .type = NLA_BINARY }, [HWSIM_ATTR_MLO_SUPPORT] = { .type = NLA_FLAG }, [HWSIM_ATTR_PMSR_SUPPORT] = NLA_POLICY_NESTED(hwsim_pmsr_capa_policy), [HWSIM_ATTR_PMSR_RESULT] = NLA_POLICY_NESTED(hwsim_pmsr_peers_result_policy), [HWSIM_ATTR_MULTI_RADIO] = { .type = NLA_FLAG }, }; #if IS_REACHABLE(CONFIG_VIRTIO) /* MAC80211_HWSIM virtio queues */ static struct virtqueue *hwsim_vqs[HWSIM_NUM_VQS]; static bool hwsim_virtio_enabled; static DEFINE_SPINLOCK(hwsim_virtio_lock); static void hwsim_virtio_rx_work(struct work_struct *work); static DECLARE_WORK(hwsim_virtio_rx, hwsim_virtio_rx_work); static int hwsim_tx_virtio(struct mac80211_hwsim_data *data, struct sk_buff *skb) { struct scatterlist sg[1]; unsigned long flags; int err; spin_lock_irqsave(&hwsim_virtio_lock, flags); if (!hwsim_virtio_enabled) { err = -ENODEV; goto out_free; } sg_init_one(sg, skb->head, skb_end_offset(skb)); err = virtqueue_add_outbuf(hwsim_vqs[HWSIM_VQ_TX], sg, 1, skb, GFP_ATOMIC); if (err) goto out_free; virtqueue_kick(hwsim_vqs[HWSIM_VQ_TX]); spin_unlock_irqrestore(&hwsim_virtio_lock, flags); return 0; out_free: spin_unlock_irqrestore(&hwsim_virtio_lock, flags); nlmsg_free(skb); return err; } #else /* cause a linker error if this ends up being needed */ extern int hwsim_tx_virtio(struct mac80211_hwsim_data *data, struct sk_buff *skb); #define hwsim_virtio_enabled false #endif static int hwsim_get_chanwidth(enum nl80211_chan_width bw) { switch (bw) { case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: return 20; case NL80211_CHAN_WIDTH_40: return 40; case NL80211_CHAN_WIDTH_80: return 80; case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_160: return 160; case NL80211_CHAN_WIDTH_320: return 320; case NL80211_CHAN_WIDTH_5: return 5; case NL80211_CHAN_WIDTH_10: return 10; case NL80211_CHAN_WIDTH_1: return 1; case NL80211_CHAN_WIDTH_2: return 2; case NL80211_CHAN_WIDTH_4: return 4; case NL80211_CHAN_WIDTH_8: return 8; case NL80211_CHAN_WIDTH_16: return 16; } return INT_MAX; } static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, struct sk_buff *skb, struct ieee80211_channel *chan); /* sysfs attributes */ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif) { struct mac80211_hwsim_data *data = dat; struct hwsim_vif_priv *vp = (void *)vif->drv_priv; struct sk_buff *skb; struct ieee80211_pspoll *pspoll; if (!vp->assoc) return; wiphy_dbg(data->hw->wiphy, "%s: send PS-Poll to %pM for aid %d\n", __func__, vp->bssid, vp->aid); skb = dev_alloc_skb(sizeof(*pspoll)); if (!skb) return; pspoll = skb_put(skb, sizeof(*pspoll)); pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL | IEEE80211_FCTL_PM); pspoll->aid = cpu_to_le16(0xc000 | vp->aid); memcpy(pspoll->bssid, vp->bssid, ETH_ALEN); memcpy(pspoll->ta, mac, ETH_ALEN); rcu_read_lock(); mac80211_hwsim_tx_frame(data->hw, skb, rcu_dereference(vif->bss_conf.chanctx_conf)->def.chan); rcu_read_unlock(); } static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac, struct ieee80211_vif *vif, int ps) { struct hwsim_vif_priv *vp = (void *)vif->drv_priv; struct sk_buff *skb; struct ieee80211_hdr *hdr; struct ieee80211_tx_info *cb; if (!vp->assoc) return; wiphy_dbg(data->hw->wiphy, "%s: send data::nullfunc to %pM ps=%d\n", __func__, vp->bssid, ps); skb = dev_alloc_skb(sizeof(*hdr)); if (!skb) return; hdr = skb_put(skb, sizeof(*hdr) - ETH_ALEN); hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_TODS | (ps ? IEEE80211_FCTL_PM : 0)); hdr->duration_id = cpu_to_le16(0); memcpy(hdr->addr1, vp->bssid, ETH_ALEN); memcpy(hdr->addr2, mac, ETH_ALEN); memcpy(hdr->addr3, vp->bssid, ETH_ALEN); cb = IEEE80211_SKB_CB(skb); cb->control.rates[0].count = 1; cb->control.rates[1].idx = -1; rcu_read_lock(); mac80211_hwsim_tx_frame(data->hw, skb, rcu_dereference(vif->bss_conf.chanctx_conf)->def.chan); rcu_read_unlock(); } static void hwsim_send_nullfunc_ps(void *dat, u8 *mac, struct ieee80211_vif *vif) { struct mac80211_hwsim_data *data = dat; hwsim_send_nullfunc(data, mac, vif, 1); } static void hwsim_send_nullfunc_no_ps(void *dat, u8 *mac, struct ieee80211_vif *vif) { struct mac80211_hwsim_data *data = dat; hwsim_send_nullfunc(data, mac, vif, 0); } static int hwsim_fops_ps_read(void *dat, u64 *val) { struct mac80211_hwsim_data *data = dat; *val = data->ps; return 0; } static int hwsim_fops_ps_write(void *dat, u64 val) { struct mac80211_hwsim_data *data = dat; enum ps_mode old_ps; if (val != PS_DISABLED && val != PS_ENABLED && val != PS_AUTO_POLL && val != PS_MANUAL_POLL) return -EINVAL; if (val == PS_MANUAL_POLL) { if (data->ps != PS_ENABLED) return -EINVAL; local_bh_disable(); ieee80211_iterate_active_interfaces_atomic( data->hw, IEEE80211_IFACE_ITER_NORMAL, hwsim_send_ps_poll, data); local_bh_enable(); return 0; } old_ps = data->ps; data->ps = val; local_bh_disable(); if (old_ps == PS_DISABLED && val != PS_DISABLED) { ieee80211_iterate_active_interfaces_atomic( data->hw, IEEE80211_IFACE_ITER_NORMAL, hwsim_send_nullfunc_ps, data); } else if (old_ps != PS_DISABLED && val == PS_DISABLED) { ieee80211_iterate_active_interfaces_atomic( data->hw, IEEE80211_IFACE_ITER_NORMAL, hwsim_send_nullfunc_no_ps, data); } local_bh_enable(); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(hwsim_fops_ps, hwsim_fops_ps_read, hwsim_fops_ps_write, "%llu\n"); static int hwsim_write_simulate_radar(void *dat, u64 val) { struct mac80211_hwsim_data *data = dat; ieee80211_radar_detected(data->hw, NULL); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(hwsim_simulate_radar, NULL, hwsim_write_simulate_radar, "%llu\n"); static int hwsim_fops_group_read(void *dat, u64 *val) { struct mac80211_hwsim_data *data = dat; *val = data->group; return 0; } static int hwsim_fops_group_write(void *dat, u64 val) { struct mac80211_hwsim_data *data = dat; data->group = val; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(hwsim_fops_group, hwsim_fops_group_read, hwsim_fops_group_write, "%llx\n"); static int hwsim_fops_rx_rssi_read(void *dat, u64 *val) { struct mac80211_hwsim_data *data = dat; *val = data->rx_rssi; return 0; } static int hwsim_fops_rx_rssi_write(void *dat, u64 val) { struct mac80211_hwsim_data *data = dat; int rssi = (int)val; if (rssi >= 0 || rssi < -100) return -EINVAL; data->rx_rssi = rssi; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(hwsim_fops_rx_rssi, hwsim_fops_rx_rssi_read, hwsim_fops_rx_rssi_write, "%lld\n"); static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb, struct net_device *dev) { /* TODO: allow packet injection */ dev_kfree_skb(skb); return NETDEV_TX_OK; } static inline u64 mac80211_hwsim_get_tsf_raw(void) { return ktime_to_us(ktime_get_real()); } static __le64 __mac80211_hwsim_get_tsf(struct mac80211_hwsim_data *data) { u64 now = mac80211_hwsim_get_tsf_raw(); return cpu_to_le64(now + data->tsf_offset); } static u64 mac80211_hwsim_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mac80211_hwsim_data *data = hw->priv; return le64_to_cpu(__mac80211_hwsim_get_tsf(data)); } static void mac80211_hwsim_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u64 tsf) { struct mac80211_hwsim_data *data = hw->priv; u64 now = mac80211_hwsim_get_tsf(hw, vif); /* MLD not supported here */ u32 bcn_int = data->link_data[0].beacon_int; u64 delta = abs(tsf - now); /* adjust after beaconing with new timestamp at old TBTT */ if (tsf > now) { data->tsf_offset += delta; data->bcn_delta = do_div(delta, bcn_int); } else { data->tsf_offset -= delta; data->bcn_delta = -(s64)do_div(delta, bcn_int); } } static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw, struct sk_buff *tx_skb, struct ieee80211_channel *chan) { struct mac80211_hwsim_data *data = hw->priv; struct sk_buff *skb; struct hwsim_radiotap_hdr *hdr; u16 flags, bitrate; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_skb); struct ieee80211_rate *txrate = ieee80211_get_tx_rate(hw, info); if (!txrate) bitrate = 0; else bitrate = txrate->bitrate; if (!netif_running(hwsim_mon)) return; skb = skb_copy_expand(tx_skb, sizeof(*hdr), 0, GFP_ATOMIC); if (skb == NULL) return; hdr = skb_push(skb, sizeof(*hdr)); hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION; hdr->hdr.it_pad = 0; hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr)); hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | (1 << IEEE80211_RADIOTAP_RATE) | (1 << IEEE80211_RADIOTAP_TSFT) | (1 << IEEE80211_RADIOTAP_CHANNEL)); hdr->rt_tsft = __mac80211_hwsim_get_tsf(data); hdr->rt_flags = 0; hdr->rt_rate = bitrate / 5; hdr->rt_channel = cpu_to_le16(chan->center_freq); flags = IEEE80211_CHAN_2GHZ; if (txrate && txrate->flags & IEEE80211_RATE_ERP_G) flags |= IEEE80211_CHAN_OFDM; else flags |= IEEE80211_CHAN_CCK; hdr->rt_chbitmask = cpu_to_le16(flags); skb->dev = hwsim_mon; skb_reset_mac_header(skb); skb->ip_summed = CHECKSUM_UNNECESSARY; skb->pkt_type = PACKET_OTHERHOST; skb->protocol = htons(ETH_P_802_2); memset(skb->cb, 0, sizeof(skb->cb)); netif_rx(skb); } static void mac80211_hwsim_monitor_ack(struct ieee80211_channel *chan, const u8 *addr) { struct sk_buff *skb; struct hwsim_radiotap_ack_hdr *hdr; u16 flags; struct ieee80211_hdr *hdr11; if (!netif_running(hwsim_mon)) return; skb = dev_alloc_skb(100); if (skb == NULL) return; hdr = skb_put(skb, sizeof(*hdr)); hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION; hdr->hdr.it_pad = 0; hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr)); hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | (1 << IEEE80211_RADIOTAP_CHANNEL)); hdr->rt_flags = 0; hdr->pad = 0; hdr->rt_channel = cpu_to_le16(chan->center_freq); flags = IEEE80211_CHAN_2GHZ; hdr->rt_chbitmask = cpu_to_le16(flags); hdr11 = skb_put(skb, 10); hdr11->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_ACK); hdr11->duration_id = cpu_to_le16(0); memcpy(hdr11->addr1, addr, ETH_ALEN); skb->dev = hwsim_mon; skb_reset_mac_header(skb); skb->ip_summed = CHECKSUM_UNNECESSARY; skb->pkt_type = PACKET_OTHERHOST; skb->protocol = htons(ETH_P_802_2); memset(skb->cb, 0, sizeof(skb->cb)); netif_rx(skb); } struct mac80211_hwsim_addr_match_data { u8 addr[ETH_ALEN]; bool ret; }; static void mac80211_hwsim_addr_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { int i; struct mac80211_hwsim_addr_match_data *md = data; if (memcmp(mac, md->addr, ETH_ALEN) == 0) { md->ret = true; return; } /* Match the link address */ for (i = 0; i < ARRAY_SIZE(vif->link_conf); i++) { struct ieee80211_bss_conf *conf; conf = rcu_dereference(vif->link_conf[i]); if (!conf) continue; if (memcmp(conf->addr, md->addr, ETH_ALEN) == 0) { md->ret = true; return; } } } static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data, const u8 *addr) { struct mac80211_hwsim_addr_match_data md = { .ret = false, }; if (data->scanning && memcmp(addr, data->scan_addr, ETH_ALEN) == 0) return true; memcpy(md.addr, addr, ETH_ALEN); ieee80211_iterate_active_interfaces_atomic(data->hw, IEEE80211_IFACE_ITER_NORMAL, mac80211_hwsim_addr_iter, &md); return md.ret; } static bool hwsim_ps_rx_ok(struct mac80211_hwsim_data *data, struct sk_buff *skb) { switch (data->ps) { case PS_DISABLED: return true; case PS_ENABLED: return false; case PS_AUTO_POLL: /* TODO: accept (some) Beacons by default and other frames only * if pending PS-Poll has been sent */ return true; case PS_MANUAL_POLL: /* Allow unicast frames to own address if there is a pending * PS-Poll */ if (data->ps_poll_pending && mac80211_hwsim_addr_match(data, skb->data + 4)) { data->ps_poll_pending = false; return true; } return false; } return true; } static int hwsim_unicast_netgroup(struct mac80211_hwsim_data *data, struct sk_buff *skb, int portid) { struct net *net; bool found = false; int res = -ENOENT; rcu_read_lock(); for_each_net_rcu(net) { if (data->netgroup == hwsim_net_get_netgroup(net)) { res = genlmsg_unicast(net, skb, portid); found = true; break; } } rcu_read_unlock(); if (!found) nlmsg_free(skb); return res; } static void mac80211_hwsim_config_mac_nl(struct ieee80211_hw *hw, const u8 *addr, bool add) { struct mac80211_hwsim_data *data = hw->priv; u32 _portid = READ_ONCE(data->wmediumd); struct sk_buff *skb; void *msg_head; WARN_ON(!is_valid_ether_addr(addr)); if (!_portid && !hwsim_virtio_enabled) return; skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb) return; msg_head = genlmsg_put(skb, 0, 0, &hwsim_genl_family, 0, add ? HWSIM_CMD_ADD_MAC_ADDR : HWSIM_CMD_DEL_MAC_ADDR); if (!msg_head) { pr_debug("mac80211_hwsim: problem with msg_head\n"); goto nla_put_failure; } if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER, ETH_ALEN, data->addresses[1].addr)) goto nla_put_failure; if (nla_put(skb, HWSIM_ATTR_ADDR_RECEIVER, ETH_ALEN, addr)) goto nla_put_failure; genlmsg_end(skb, msg_head); if (hwsim_virtio_enabled) hwsim_tx_virtio(data, skb); else hwsim_unicast_netgroup(data, skb, _portid); return; nla_put_failure: nlmsg_free(skb); } static inline u16 trans_tx_rate_flags_ieee2hwsim(struct ieee80211_tx_rate *rate) { u16 result = 0; if (rate->flags & IEEE80211_TX_RC_USE_RTS_CTS) result |= MAC80211_HWSIM_TX_RC_USE_RTS_CTS; if (rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT) result |= MAC80211_HWSIM_TX_RC_USE_CTS_PROTECT; if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) result |= MAC80211_HWSIM_TX_RC_USE_SHORT_PREAMBLE; if (rate->flags & IEEE80211_TX_RC_MCS) result |= MAC80211_HWSIM_TX_RC_MCS; if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD) result |= MAC80211_HWSIM_TX_RC_GREEN_FIELD; if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) result |= MAC80211_HWSIM_TX_RC_40_MHZ_WIDTH; if (rate->flags & IEEE80211_TX_RC_DUP_DATA) result |= MAC80211_HWSIM_TX_RC_DUP_DATA; if (rate->flags & IEEE80211_TX_RC_SHORT_GI) result |= MAC80211_HWSIM_TX_RC_SHORT_GI; if (rate->flags & IEEE80211_TX_RC_VHT_MCS) result |= MAC80211_HWSIM_TX_RC_VHT_MCS; if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) result |= MAC80211_HWSIM_TX_RC_80_MHZ_WIDTH; if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH) result |= MAC80211_HWSIM_TX_RC_160_MHZ_WIDTH; return result; } static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, struct sk_buff *my_skb, int dst_portid, struct ieee80211_channel *channel) { struct sk_buff *skb; struct mac80211_hwsim_data *data = hw->priv; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) my_skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(my_skb); void *msg_head; unsigned int hwsim_flags = 0; int i; struct hwsim_tx_rate tx_attempts[IEEE80211_TX_MAX_RATES]; struct hwsim_tx_rate_flag tx_attempts_flags[IEEE80211_TX_MAX_RATES]; uintptr_t cookie; if (data->ps != PS_DISABLED) hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); /* If the queue contains MAX_QUEUE skb's drop some */ if (skb_queue_len(&data->pending) >= MAX_QUEUE) { /* Dropping until WARN_QUEUE level */ while (skb_queue_len(&data->pending) >= WARN_QUEUE) { ieee80211_free_txskb(hw, skb_dequeue(&data->pending)); data->tx_dropped++; } } skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (skb == NULL) goto nla_put_failure; msg_head = genlmsg_put(skb, 0, 0, &hwsim_genl_family, 0, HWSIM_CMD_FRAME); if (msg_head == NULL) { pr_debug("mac80211_hwsim: problem with msg_head\n"); goto nla_put_failure; } if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER, ETH_ALEN, data->addresses[1].addr)) goto nla_put_failure; /* We get the skb->data */ if (nla_put(skb, HWSIM_ATTR_FRAME, my_skb->len, my_skb->data)) goto nla_put_failure; /* We get the flags for this transmission, and we translate them to wmediumd flags */ if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) hwsim_flags |= HWSIM_TX_CTL_REQ_TX_STATUS; if (info->flags & IEEE80211_TX_CTL_NO_ACK) hwsim_flags |= HWSIM_TX_CTL_NO_ACK; if (nla_put_u32(skb, HWSIM_ATTR_FLAGS, hwsim_flags)) goto nla_put_failure; if (nla_put_u32(skb, HWSIM_ATTR_FREQ, channel->center_freq)) goto nla_put_failure; /* We get the tx control (rate and retries) info*/ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { tx_attempts[i].idx = info->status.rates[i].idx; tx_attempts_flags[i].idx = info->status.rates[i].idx; tx_attempts[i].count = info->status.rates[i].count; tx_attempts_flags[i].flags = trans_tx_rate_flags_ieee2hwsim( &info->status.rates[i]); } if (nla_put(skb, HWSIM_ATTR_TX_INFO, sizeof(struct hwsim_tx_rate)*IEEE80211_TX_MAX_RATES, tx_attempts)) goto nla_put_failure; if (nla_put(skb, HWSIM_ATTR_TX_INFO_FLAGS, sizeof(struct hwsim_tx_rate_flag) * IEEE80211_TX_MAX_RATES, tx_attempts_flags)) goto nla_put_failure; /* We create a cookie to identify this skb */ cookie = atomic_inc_return(&data->pending_cookie); info->rate_driver_data[0] = (void *)cookie; if (nla_put_u64_64bit(skb, HWSIM_ATTR_COOKIE, cookie, HWSIM_ATTR_PAD)) goto nla_put_failure; genlmsg_end(skb, msg_head); if (hwsim_virtio_enabled) { if (hwsim_tx_virtio(data, skb)) goto err_free_txskb; } else { if (hwsim_unicast_netgroup(data, skb, dst_portid)) goto err_free_txskb; } /* Enqueue the packet */ skb_queue_tail(&data->pending, my_skb); data->tx_pkts++; data->tx_bytes += my_skb->len; return; nla_put_failure: nlmsg_free(skb); err_free_txskb: pr_debug("mac80211_hwsim: error occurred in %s\n", __func__); ieee80211_free_txskb(hw, my_skb); data->tx_failed++; } static bool hwsim_chans_compat(struct ieee80211_channel *c1, struct ieee80211_channel *c2) { if (!c1 || !c2) return false; return c1->center_freq == c2->center_freq; } struct tx_iter_data { struct ieee80211_channel *channel; bool receive; }; static void mac80211_hwsim_tx_iter(void *_data, u8 *addr, struct ieee80211_vif *vif) { struct tx_iter_data *data = _data; int i; for (i = 0; i < ARRAY_SIZE(vif->link_conf); i++) { struct ieee80211_bss_conf *conf; struct ieee80211_chanctx_conf *chanctx; conf = rcu_dereference(vif->link_conf[i]); if (!conf) continue; chanctx = rcu_dereference(conf->chanctx_conf); if (!chanctx) continue; if (!hwsim_chans_compat(data->channel, chanctx->def.chan)) continue; data->receive = true; return; } } static void mac80211_hwsim_add_vendor_rtap(struct sk_buff *skb) { /* * To enable this code, #define the HWSIM_RADIOTAP_OUI, * e.g. like this: * #define HWSIM_RADIOTAP_OUI "\x02\x00\x00" * (but you should use a valid OUI, not that) * * If anyone wants to 'donate' a radiotap OUI/subns code * please send a patch removing this #ifdef and changing * the values accordingly. */ #ifdef HWSIM_RADIOTAP_OUI struct ieee80211_radiotap_vendor_tlv *rtap; static const char vendor_data[8] = "ABCDEFGH"; // Make sure no padding is needed BUILD_BUG_ON(sizeof(vendor_data) % 4); /* this is last radiotap info before the mac header, so * skb_reset_mac_header for mac8022 to know the end of * the radiotap TLV/beginning of the 802.11 header */ skb_reset_mac_header(skb); /* * Note that this code requires the headroom in the SKB * that was allocated earlier. */ rtap = skb_push(skb, sizeof(*rtap) + sizeof(vendor_data)); rtap->len = cpu_to_le16(sizeof(*rtap) - sizeof(struct ieee80211_radiotap_tlv) + sizeof(vendor_data)); rtap->type = cpu_to_le16(IEEE80211_RADIOTAP_VENDOR_NAMESPACE); rtap->content.oui[0] = HWSIM_RADIOTAP_OUI[0]; rtap->content.oui[1] = HWSIM_RADIOTAP_OUI[1]; rtap->content.oui[2] = HWSIM_RADIOTAP_OUI[2]; rtap->content.oui_subtype = 127; /* clear reserved field */ rtap->content.reserved = 0; rtap->content.vendor_type = 0; memcpy(rtap->content.data, vendor_data, sizeof(vendor_data)); IEEE80211_SKB_RXCB(skb)->flag |= RX_FLAG_RADIOTAP_TLV_AT_END; #endif } static void mac80211_hwsim_rx(struct mac80211_hwsim_data *data, struct ieee80211_rx_status *rx_status, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (void *)skb->data; if (!ieee80211_has_morefrags(hdr->frame_control) && !is_multicast_ether_addr(hdr->addr1) && (ieee80211_is_mgmt(hdr->frame_control) || ieee80211_is_data(hdr->frame_control))) { struct ieee80211_sta *sta; unsigned int link_id; rcu_read_lock(); sta = ieee80211_find_sta_by_link_addrs(data->hw, hdr->addr2, hdr->addr1, &link_id); if (sta) { struct hwsim_sta_priv *sp = (void *)sta->drv_priv; if (ieee80211_has_pm(hdr->frame_control)) sp->active_links_rx &= ~BIT(link_id); else sp->active_links_rx |= BIT(link_id); rx_status->link_valid = true; rx_status->link_id = link_id; } rcu_read_unlock(); } memcpy(IEEE80211_SKB_RXCB(skb), rx_status, sizeof(*rx_status)); mac80211_hwsim_add_vendor_rtap(skb); data->rx_pkts++; data->rx_bytes += skb->len; ieee80211_rx_irqsafe(data->hw, skb); } static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw, struct sk_buff *skb, struct ieee80211_channel *chan) { struct mac80211_hwsim_data *data = hw->priv, *data2; bool ack = false; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_rx_status rx_status; u64 now; memset(&rx_status, 0, sizeof(rx_status)); rx_status.flag |= RX_FLAG_MACTIME_START; rx_status.freq = chan->center_freq; rx_status.freq_offset = chan->freq_offset ? 1 : 0; rx_status.band = chan->band; if (info->control.rates[0].flags & IEEE80211_TX_RC_VHT_MCS) { rx_status.rate_idx = ieee80211_rate_get_vht_mcs(&info->control.rates[0]); rx_status.nss = ieee80211_rate_get_vht_nss(&info->control.rates[0]); rx_status.encoding = RX_ENC_VHT; } else { rx_status.rate_idx = info->control.rates[0].idx; if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS) rx_status.encoding = RX_ENC_HT; } if (info->control.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) rx_status.bw = RATE_INFO_BW_40; else if (info->control.rates[0].flags & IEEE80211_TX_RC_80_MHZ_WIDTH) rx_status.bw = RATE_INFO_BW_80; else if (info->control.rates[0].flags & IEEE80211_TX_RC_160_MHZ_WIDTH) rx_status.bw = RATE_INFO_BW_160; else rx_status.bw = RATE_INFO_BW_20; if (info->control.rates[0].flags & IEEE80211_TX_RC_SHORT_GI) rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI; /* TODO: simulate optional packet loss */ rx_status.signal = data->rx_rssi; if (info->control.vif) rx_status.signal += info->control.vif->bss_conf.txpower; if (data->ps != PS_DISABLED) hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); /* release the skb's source info */ skb_orphan(skb); skb_dst_drop(skb); skb->mark = 0; skb_ext_reset(skb); nf_reset_ct(skb); /* * Get absolute mactime here so all HWs RX at the "same time", and * absolute TX time for beacon mactime so the timestamp matches. * Giving beacons a different mactime than non-beacons looks messy, but * it helps the Toffset be exact and a ~10us mactime discrepancy * probably doesn't really matter. */ if (ieee80211_is_beacon(hdr->frame_control) || ieee80211_is_probe_resp(hdr->frame_control)) { rx_status.boottime_ns = ktime_get_boottime_ns(); now = data->abs_bcn_ts; } else { now = mac80211_hwsim_get_tsf_raw(); } /* Copy skb to all enabled radios that are on the current frequency */ spin_lock(&hwsim_radio_lock); list_for_each_entry(data2, &hwsim_radios, list) { struct sk_buff *nskb; struct tx_iter_data tx_iter_data = { .receive = false, .channel = chan, }; if (data == data2) continue; if (!data2->started || (data2->idle && !data2->tmp_chan) || !hwsim_ps_rx_ok(data2, skb)) continue; if (!(data->group & data2->group)) continue; if (data->netgroup != data2->netgroup) continue; if (!hwsim_chans_compat(chan, data2->tmp_chan) && !hwsim_chans_compat(chan, data2->channel)) { ieee80211_iterate_active_interfaces_atomic( data2->hw, IEEE80211_IFACE_ITER_NORMAL, mac80211_hwsim_tx_iter, &tx_iter_data); if (!tx_iter_data.receive) continue; } /* * reserve some space for our vendor and the normal * radiotap header, since we're copying anyway */ if (skb->len < PAGE_SIZE && paged_rx) { struct page *page = alloc_page(GFP_ATOMIC); if (!page) continue; nskb = dev_alloc_skb(128); if (!nskb) { __free_page(page); continue; } memcpy(page_address(page), skb->data, skb->len); skb_add_rx_frag(nskb, 0, page, 0, skb->len, skb->len); } else { nskb = skb_copy(skb, GFP_ATOMIC); if (!nskb) continue; } if (mac80211_hwsim_addr_match(data2, hdr->addr1)) ack = true; rx_status.mactime = now + data2->tsf_offset; mac80211_hwsim_rx(data2, &rx_status, nskb); } spin_unlock(&hwsim_radio_lock); return ack; } static struct ieee80211_bss_conf * mac80211_hwsim_select_tx_link(struct mac80211_hwsim_data *data, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_hdr *hdr, struct ieee80211_link_sta **link_sta) { struct hwsim_sta_priv *sp = (void *)sta->drv_priv; int i; if (!ieee80211_vif_is_mld(vif)) return &vif->bss_conf; WARN_ON(is_multicast_ether_addr(hdr->addr1)); if (WARN_ON_ONCE(!sta || !sta->valid_links)) return &vif->bss_conf; for (i = 0; i < ARRAY_SIZE(vif->link_conf); i++) { struct ieee80211_bss_conf *bss_conf; unsigned int link_id; /* round-robin the available link IDs */ link_id = (sp->last_link + i + 1) % ARRAY_SIZE(vif->link_conf); if (!(vif->active_links & BIT(link_id))) continue; if (!(sp->active_links_rx & BIT(link_id))) continue; *link_sta = rcu_dereference(sta->link[link_id]); if (!*link_sta) continue; bss_conf = rcu_dereference(vif->link_conf[link_id]); if (WARN_ON_ONCE(!bss_conf)) continue; /* can happen while switching links */ if (!rcu_access_pointer(bss_conf->chanctx_conf)) continue; sp->last_link = link_id; return bss_conf; } return NULL; } static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) { struct mac80211_hwsim_data *data = hw->priv; struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_channel *channel; bool ack; enum nl80211_chan_width confbw = NL80211_CHAN_WIDTH_20_NOHT; u32 _portid, i; if (WARN_ON(skb->len < 10)) { /* Should not happen; just a sanity check for addr1 use */ ieee80211_free_txskb(hw, skb); return; } if (!data->use_chanctx) { channel = data->channel; confbw = data->bw; } else if (txi->hw_queue == 4) { channel = data->tmp_chan; } else { u8 link = u32_get_bits(IEEE80211_SKB_CB(skb)->control.flags, IEEE80211_TX_CTRL_MLO_LINK); struct ieee80211_vif *vif = txi->control.vif; struct ieee80211_link_sta *link_sta = NULL; struct ieee80211_sta *sta = control->sta; struct ieee80211_bss_conf *bss_conf; if (link != IEEE80211_LINK_UNSPECIFIED) { bss_conf = rcu_dereference(txi->control.vif->link_conf[link]); if (sta) link_sta = rcu_dereference(sta->link[link]); } else { bss_conf = mac80211_hwsim_select_tx_link(data, vif, sta, hdr, &link_sta); } if (unlikely(!bss_conf)) { /* if it's an MLO STA, it might have deactivated all * links temporarily - but we don't handle real PS in * this code yet, so just drop the frame in that case */ WARN(link != IEEE80211_LINK_UNSPECIFIED || !sta || !sta->mlo, "link:%d, sta:%pM, sta->mlo:%d\n", link, sta ? sta->addr : NULL, sta ? sta->mlo : -1); ieee80211_free_txskb(hw, skb); return; } if (sta && sta->mlo) { if (WARN_ON(!link_sta)) { ieee80211_free_txskb(hw, skb); return; } /* address translation to link addresses on TX */ ether_addr_copy(hdr->addr1, link_sta->addr); ether_addr_copy(hdr->addr2, bss_conf->addr); /* translate A3 only if it's the BSSID */ if (!ieee80211_has_tods(hdr->frame_control) && !ieee80211_has_fromds(hdr->frame_control)) { if (ether_addr_equal(hdr->addr3, sta->addr)) ether_addr_copy(hdr->addr3, link_sta->addr); else if (ether_addr_equal(hdr->addr3, vif->addr)) ether_addr_copy(hdr->addr3, bss_conf->addr); } /* no need to look at A4, if present it's SA */ } chanctx_conf = rcu_dereference(bss_conf->chanctx_conf); if (chanctx_conf) { channel = chanctx_conf->def.chan; confbw = chanctx_conf->def.width; } else { channel = NULL; } } if (WARN(!channel, "TX w/o channel - queue = %d\n", txi->hw_queue)) { ieee80211_free_txskb(hw, skb); return; } if (data->idle && !data->tmp_chan) { wiphy_dbg(hw->wiphy, "Trying to TX when idle - reject\n"); ieee80211_free_txskb(hw, skb); return; } if (txi->control.vif) hwsim_check_magic(txi->control.vif); if (control->sta) hwsim_check_sta_magic(control->sta); if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) ieee80211_get_tx_rates(txi->control.vif, control->sta, skb, txi->control.rates, ARRAY_SIZE(txi->control.rates)); for (i = 0; i < ARRAY_SIZE(txi->control.rates); i++) { u16 rflags = txi->control.rates[i].flags; /* initialize to data->bw for 5/10 MHz handling */ enum nl80211_chan_width bw = data->bw; if (txi->control.rates[i].idx == -1) break; if (rflags & IEEE80211_TX_RC_40_MHZ_WIDTH) bw = NL80211_CHAN_WIDTH_40; else if (rflags & IEEE80211_TX_RC_80_MHZ_WIDTH) bw = NL80211_CHAN_WIDTH_80; else if (rflags & IEEE80211_TX_RC_160_MHZ_WIDTH) bw = NL80211_CHAN_WIDTH_160; if (WARN_ON(hwsim_get_chanwidth(bw) > hwsim_get_chanwidth(confbw))) return; } if (skb->len >= 24 + 8 && ieee80211_is_probe_resp(hdr->frame_control)) { /* fake header transmission time */ struct ieee80211_mgmt *mgmt; struct ieee80211_rate *txrate; /* TODO: get MCS */ int bitrate = 100; u64 ts; mgmt = (struct ieee80211_mgmt *)skb->data; txrate = ieee80211_get_tx_rate(hw, txi); if (txrate) bitrate = txrate->bitrate; ts = mac80211_hwsim_get_tsf_raw(); mgmt->u.probe_resp.timestamp = cpu_to_le64(ts + data->tsf_offset + 24 * 8 * 10 / bitrate); } mac80211_hwsim_monitor_rx(hw, skb, channel); /* wmediumd mode check */ _portid = READ_ONCE(data->wmediumd); if (_portid || hwsim_virtio_enabled) return mac80211_hwsim_tx_frame_nl(hw, skb, _portid, channel); /* NO wmediumd detected, perfect medium simulation */ data->tx_pkts++; data->tx_bytes += skb->len; ack = mac80211_hwsim_tx_frame_no_nl(hw, skb, channel); if (ack && skb->len >= 16) mac80211_hwsim_monitor_ack(channel, hdr->addr2); ieee80211_tx_info_clear_status(txi); /* frame was transmitted at most favorable rate at first attempt */ txi->control.rates[0].count = 1; txi->control.rates[1].idx = -1; if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK) && ack) txi->flags |= IEEE80211_TX_STAT_ACK; ieee80211_tx_status_irqsafe(hw, skb); } static int mac80211_hwsim_start(struct ieee80211_hw *hw) { struct mac80211_hwsim_data *data = hw->priv; wiphy_dbg(hw->wiphy, "%s\n", __func__); data->started = true; return 0; } static void mac80211_hwsim_stop(struct ieee80211_hw *hw, bool suspend) { struct mac80211_hwsim_data *data = hw->priv; int i; data->started = false; for (i = 0; i < ARRAY_SIZE(data->link_data); i++) hrtimer_cancel(&data->link_data[i].beacon_timer); while (!skb_queue_empty(&data->pending)) ieee80211_free_txskb(hw, skb_dequeue(&data->pending)); wiphy_dbg(hw->wiphy, "%s\n", __func__); } static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { wiphy_dbg(hw->wiphy, "%s (type=%d mac_addr=%pM)\n", __func__, ieee80211_vif_type_p2p(vif), vif->addr); hwsim_set_magic(vif); if (vif->type != NL80211_IFTYPE_MONITOR) mac80211_hwsim_config_mac_nl(hw, vif->addr, true); vif->cab_queue = 0; vif->hw_queue[IEEE80211_AC_VO] = 0; vif->hw_queue[IEEE80211_AC_VI] = 1; vif->hw_queue[IEEE80211_AC_BE] = 2; vif->hw_queue[IEEE80211_AC_BK] = 3; return 0; } #ifdef CONFIG_MAC80211_DEBUGFS static void mac80211_hwsim_link_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf, struct dentry *dir) { struct hwsim_vif_priv *vp = (void *)vif->drv_priv; debugfs_create_u32("skip_beacons", 0600, dir, &vp->skip_beacons[link_conf->link_id]); } #endif static int mac80211_hwsim_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum nl80211_iftype newtype, bool newp2p) { newtype = ieee80211_iftype_p2p(newtype, newp2p); wiphy_dbg(hw->wiphy, "%s (old type=%d, new type=%d, mac_addr=%pM)\n", __func__, ieee80211_vif_type_p2p(vif), newtype, vif->addr); hwsim_check_magic(vif); /* * interface may change from non-AP to AP in * which case this needs to be set up again */ vif->cab_queue = 0; return 0; } static void mac80211_hwsim_remove_interface( struct ieee80211_hw *hw, struct ieee80211_vif *vif) { wiphy_dbg(hw->wiphy, "%s (type=%d mac_addr=%pM)\n", __func__, ieee80211_vif_type_p2p(vif), vif->addr); hwsim_check_magic(vif); hwsim_clear_magic(vif); if (vif->type != NL80211_IFTYPE_MONITOR) mac80211_hwsim_config_mac_nl(hw, vif->addr, false); } static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, struct sk_buff *skb, struct ieee80211_channel *chan) { struct mac80211_hwsim_data *data = hw->priv; u32 _portid = READ_ONCE(data->wmediumd); if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) { struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb); ieee80211_get_tx_rates(txi->control.vif, NULL, skb, txi->control.rates, ARRAY_SIZE(txi->control.rates)); } mac80211_hwsim_monitor_rx(hw, skb, chan); if (_portid || hwsim_virtio_enabled) return mac80211_hwsim_tx_frame_nl(hw, skb, _portid, chan); data->tx_pkts++; data->tx_bytes += skb->len; mac80211_hwsim_tx_frame_no_nl(hw, skb, chan); dev_kfree_skb(skb); } static void __mac80211_hwsim_beacon_tx(struct ieee80211_bss_conf *link_conf, struct mac80211_hwsim_data *data, struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct sk_buff *skb) { struct hwsim_vif_priv *vp = (void *)vif->drv_priv; struct ieee80211_tx_info *info; struct ieee80211_rate *txrate; struct ieee80211_mgmt *mgmt; /* TODO: get MCS */ int bitrate = 100; if (vp->skip_beacons[link_conf->link_id]) { vp->skip_beacons[link_conf->link_id]--; dev_kfree_skb(skb); return; } info = IEEE80211_SKB_CB(skb); if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) ieee80211_get_tx_rates(vif, NULL, skb, info->control.rates, ARRAY_SIZE(info->control.rates)); txrate = ieee80211_get_tx_rate(hw, info); if (txrate) bitrate = txrate->bitrate; mgmt = (struct ieee80211_mgmt *) skb->data; /* fake header transmission time */ data->abs_bcn_ts = mac80211_hwsim_get_tsf_raw(); if (ieee80211_is_s1g_beacon(mgmt->frame_control)) { struct ieee80211_ext *ext = (void *) mgmt; ext->u.s1g_beacon.timestamp = cpu_to_le32(data->abs_bcn_ts + data->tsf_offset + 10 * 8 * 10 / bitrate); } else { mgmt->u.beacon.timestamp = cpu_to_le64(data->abs_bcn_ts + data->tsf_offset + 24 * 8 * 10 / bitrate); } mac80211_hwsim_tx_frame(hw, skb, rcu_dereference(link_conf->chanctx_conf)->def.chan); } static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac, struct ieee80211_vif *vif) { struct mac80211_hwsim_link_data *link_data = arg; u32 link_id = link_data->link_id; struct ieee80211_bss_conf *link_conf; struct mac80211_hwsim_data *data = container_of(link_data, struct mac80211_hwsim_data, link_data[link_id]); struct ieee80211_hw *hw = data->hw; struct sk_buff *skb; hwsim_check_magic(vif); link_conf = rcu_dereference(vif->link_conf[link_id]); if (!link_conf) return; if (vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_MESH_POINT && vif->type != NL80211_IFTYPE_ADHOC && vif->type != NL80211_IFTYPE_OCB) return; if (vif->mbssid_tx_vif && vif->mbssid_tx_vif != vif) return; if (vif->bss_conf.ema_ap) { struct ieee80211_ema_beacons *ema; u8 i = 0; ema = ieee80211_beacon_get_template_ema_list(hw, vif, link_id); if (!ema || !ema->cnt) return; for (i = 0; i < ema->cnt; i++) { __mac80211_hwsim_beacon_tx(link_conf, data, hw, vif, ema->bcn[i].skb); ema->bcn[i].skb = NULL; /* Already freed */ } ieee80211_beacon_free_ema_list(ema); } else { skb = ieee80211_beacon_get(hw, vif, link_id); if (!skb) return; __mac80211_hwsim_beacon_tx(link_conf, data, hw, vif, skb); } while ((skb = ieee80211_get_buffered_bc(hw, vif)) != NULL) { mac80211_hwsim_tx_frame(hw, skb, rcu_dereference(link_conf->chanctx_conf)->def.chan); } if (link_conf->csa_active && ieee80211_beacon_cntdwn_is_complete(vif, link_id)) ieee80211_csa_finish(vif, link_id); if (link_conf->color_change_active && ieee80211_beacon_cntdwn_is_complete(vif, link_id)) ieee80211_color_change_finish(vif, link_id); } static enum hrtimer_restart mac80211_hwsim_beacon(struct hrtimer *timer) { struct mac80211_hwsim_link_data *link_data = container_of(timer, struct mac80211_hwsim_link_data, beacon_timer); struct mac80211_hwsim_data *data = container_of(link_data, struct mac80211_hwsim_data, link_data[link_data->link_id]); struct ieee80211_hw *hw = data->hw; u64 bcn_int = link_data->beacon_int; if (!data->started) return HRTIMER_NORESTART; ieee80211_iterate_active_interfaces_atomic( hw, IEEE80211_IFACE_ITER_NORMAL, mac80211_hwsim_beacon_tx, link_data); /* beacon at new TBTT + beacon interval */ if (data->bcn_delta) { bcn_int -= data->bcn_delta; data->bcn_delta = 0; } hrtimer_forward_now(&link_data->beacon_timer, ns_to_ktime(bcn_int * NSEC_PER_USEC)); return HRTIMER_RESTART; } static const char * const hwsim_chanwidths[] = { [NL80211_CHAN_WIDTH_5] = "ht5", [NL80211_CHAN_WIDTH_10] = "ht10", [NL80211_CHAN_WIDTH_20_NOHT] = "noht", [NL80211_CHAN_WIDTH_20] = "ht20", [NL80211_CHAN_WIDTH_40] = "ht40", [NL80211_CHAN_WIDTH_80] = "vht80", [NL80211_CHAN_WIDTH_80P80] = "vht80p80", [NL80211_CHAN_WIDTH_160] = "vht160", [NL80211_CHAN_WIDTH_1] = "1MHz", [NL80211_CHAN_WIDTH_2] = "2MHz", [NL80211_CHAN_WIDTH_4] = "4MHz", [NL80211_CHAN_WIDTH_8] = "8MHz", [NL80211_CHAN_WIDTH_16] = "16MHz", [NL80211_CHAN_WIDTH_320] = "eht320", }; static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed) { struct mac80211_hwsim_data *data = hw->priv; struct ieee80211_conf *conf = &hw->conf; static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = { [IEEE80211_SMPS_AUTOMATIC] = "auto", [IEEE80211_SMPS_OFF] = "off", [IEEE80211_SMPS_STATIC] = "static", [IEEE80211_SMPS_DYNAMIC] = "dynamic", }; int idx; if (conf->chandef.chan) wiphy_dbg(hw->wiphy, "%s (freq=%d(%d - %d)/%s idle=%d ps=%d smps=%s)\n", __func__, conf->chandef.chan->center_freq, conf->chandef.center_freq1, conf->chandef.center_freq2, hwsim_chanwidths[conf->chandef.width], !!(conf->flags & IEEE80211_CONF_IDLE), !!(conf->flags & IEEE80211_CONF_PS), smps_modes[conf->smps_mode]); else wiphy_dbg(hw->wiphy, "%s (freq=0 idle=%d ps=%d smps=%s)\n", __func__, !!(conf->flags & IEEE80211_CONF_IDLE), !!(conf->flags & IEEE80211_CONF_PS), smps_modes[conf->smps_mode]); data->idle = !!(conf->flags & IEEE80211_CONF_IDLE); WARN_ON(conf->chandef.chan && data->use_chanctx); mutex_lock(&data->mutex); if (data->scanning && conf->chandef.chan) { for (idx = 0; idx < ARRAY_SIZE(data->survey_data); idx++) { if (data->survey_data[idx].channel == data->channel) { data->survey_data[idx].start = data->survey_data[idx].next_start; data->survey_data[idx].end = jiffies; break; } } data->channel = conf->chandef.chan; data->bw = conf->chandef.width; for (idx = 0; idx < ARRAY_SIZE(data->survey_data); idx++) { if (data->survey_data[idx].channel && data->survey_data[idx].channel != data->channel) continue; data->survey_data[idx].channel = data->channel; data->survey_data[idx].next_start = jiffies; break; } } else { data->channel = conf->chandef.chan; data->bw = conf->chandef.width; } mutex_unlock(&data->mutex); for (idx = 0; idx < ARRAY_SIZE(data->link_data); idx++) { struct mac80211_hwsim_link_data *link_data = &data->link_data[idx]; if (!data->started || !link_data->beacon_int) { hrtimer_cancel(&link_data->beacon_timer); } else if (!hrtimer_active(&link_data->beacon_timer)) { u64 tsf = mac80211_hwsim_get_tsf(hw, NULL); u32 bcn_int = link_data->beacon_int; u64 until_tbtt = bcn_int - do_div(tsf, bcn_int); hrtimer_start(&link_data->beacon_timer, ns_to_ktime(until_tbtt * NSEC_PER_USEC), HRTIMER_MODE_REL_SOFT); } } return 0; } static void mac80211_hwsim_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags,u64 multicast) { struct mac80211_hwsim_data *data = hw->priv; wiphy_dbg(hw->wiphy, "%s\n", __func__); data->rx_filter = 0; if (*total_flags & FIF_ALLMULTI) data->rx_filter |= FIF_ALLMULTI; if (*total_flags & FIF_MCAST_ACTION) data->rx_filter |= FIF_MCAST_ACTION; *total_flags = data->rx_filter; } static void mac80211_hwsim_bcn_en_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { unsigned int *count = data; struct hwsim_vif_priv *vp = (void *)vif->drv_priv; if (vp->bcn_en) (*count)++; } static void mac80211_hwsim_vif_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u64 changed) { struct hwsim_vif_priv *vp = (void *)vif->drv_priv; hwsim_check_magic(vif); wiphy_dbg(hw->wiphy, "%s(changed=0x%llx vif->addr=%pM)\n", __func__, changed, vif->addr); if (changed & BSS_CHANGED_ASSOC) { wiphy_dbg(hw->wiphy, " ASSOC: assoc=%d aid=%d\n", vif->cfg.assoc, vif->cfg.aid); vp->assoc = vif->cfg.assoc; vp->aid = vif->cfg.aid; } if (vif->type == NL80211_IFTYPE_STATION && changed & (BSS_CHANGED_MLD_VALID_LINKS | BSS_CHANGED_MLD_TTLM)) { u16 usable_links = ieee80211_vif_usable_links(vif); if (vif->active_links != usable_links) ieee80211_set_active_links_async(vif, usable_links); } } static void mac80211_hwsim_link_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u64 changed) { struct hwsim_vif_priv *vp = (void *)vif->drv_priv; struct mac80211_hwsim_data *data = hw->priv; unsigned int link_id = info->link_id; struct mac80211_hwsim_link_data *link_data = &data->link_data[link_id]; hwsim_check_magic(vif); wiphy_dbg(hw->wiphy, "%s(changed=0x%llx vif->addr=%pM, link id %u)\n", __func__, (unsigned long long)changed, vif->addr, link_id); if (changed & BSS_CHANGED_BSSID) { wiphy_dbg(hw->wiphy, "%s: BSSID changed: %pM\n", __func__, info->bssid); memcpy(vp->bssid, info->bssid, ETH_ALEN); } if (changed & BSS_CHANGED_BEACON_ENABLED) { wiphy_dbg(hw->wiphy, " BCN EN: %d (BI=%u)\n", info->enable_beacon, info->beacon_int); vp->bcn_en = info->enable_beacon; if (data->started && !hrtimer_active(&link_data->beacon_timer) && info->enable_beacon) { u64 tsf, until_tbtt; u32 bcn_int; link_data->beacon_int = info->beacon_int * 1024; tsf = mac80211_hwsim_get_tsf(hw, vif); bcn_int = link_data->beacon_int; until_tbtt = bcn_int - do_div(tsf, bcn_int); hrtimer_start(&link_data->beacon_timer, ns_to_ktime(until_tbtt * NSEC_PER_USEC), HRTIMER_MODE_REL_SOFT); } else if (!info->enable_beacon) { unsigned int count = 0; ieee80211_iterate_active_interfaces_atomic( data->hw, IEEE80211_IFACE_ITER_NORMAL, mac80211_hwsim_bcn_en_iter, &count); wiphy_dbg(hw->wiphy, " beaconing vifs remaining: %u", count); if (count == 0) { hrtimer_cancel(&link_data->beacon_timer); link_data->beacon_int = 0; } } } if (changed & BSS_CHANGED_ERP_CTS_PROT) { wiphy_dbg(hw->wiphy, " ERP_CTS_PROT: %d\n", info->use_cts_prot); } if (changed & BSS_CHANGED_ERP_PREAMBLE) { wiphy_dbg(hw->wiphy, " ERP_PREAMBLE: %d\n", info->use_short_preamble); } if (changed & BSS_CHANGED_ERP_SLOT) { wiphy_dbg(hw->wiphy, " ERP_SLOT: %d\n", info->use_short_slot); } if (changed & BSS_CHANGED_HT) { wiphy_dbg(hw->wiphy, " HT: op_mode=0x%x\n", info->ht_operation_mode); } if (changed & BSS_CHANGED_BASIC_RATES) { wiphy_dbg(hw->wiphy, " BASIC_RATES: 0x%llx\n", (unsigned long long) info->basic_rates); } if (changed & BSS_CHANGED_TXPOWER) wiphy_dbg(hw->wiphy, " TX Power: %d dBm\n", info->txpower); } static void mac80211_hwsim_sta_rc_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_link_sta *link_sta, u32 changed) { struct mac80211_hwsim_data *data = hw->priv; struct ieee80211_sta *sta = link_sta->sta; u32 bw = U32_MAX; int link_id; rcu_read_lock(); for (link_id = 0; link_id < ARRAY_SIZE(vif->link_conf); link_id++) { enum nl80211_chan_width confbw = NL80211_CHAN_WIDTH_20_NOHT; struct ieee80211_bss_conf *vif_conf; link_sta = rcu_dereference(sta->link[link_id]); if (!link_sta) continue; switch (link_sta->bandwidth) { #define C(_bw) case IEEE80211_STA_RX_BW_##_bw: bw = _bw; break C(20); C(40); C(80); C(160); C(320); #undef C } if (!data->use_chanctx) { confbw = data->bw; } else { struct ieee80211_chanctx_conf *chanctx_conf; vif_conf = rcu_dereference(vif->link_conf[link_id]); if (WARN_ON(!vif_conf)) continue; chanctx_conf = rcu_dereference(vif_conf->chanctx_conf); if (!WARN_ON(!chanctx_conf)) confbw = chanctx_conf->def.width; } WARN(bw > hwsim_get_chanwidth(confbw), "intf %pM [link=%d]: bad STA %pM bandwidth %d MHz (%d) > channel config %d MHz (%d)\n", vif->addr, link_id, sta->addr, bw, sta->deflink.bandwidth, hwsim_get_chanwidth(data->bw), data->bw); } rcu_read_unlock(); } static int mac80211_hwsim_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct hwsim_sta_priv *sp = (void *)sta->drv_priv; hwsim_check_magic(vif); hwsim_set_sta_magic(sta); mac80211_hwsim_sta_rc_update(hw, vif, &sta->deflink, 0); if (sta->valid_links) { WARN(hweight16(sta->valid_links) > 1, "expect to add STA with single link, have 0x%x\n", sta->valid_links); sp->active_links_rx = sta->valid_links; } return 0; } static int mac80211_hwsim_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { hwsim_check_magic(vif); hwsim_clear_sta_magic(sta); return 0; } static int mac80211_hwsim_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *s